2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
63 /* enable all paranoid tests for rounding, overflows, etc... */
69 static const uint8_t ff_default_chroma_qscale_table[32] = {
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
72 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
75 const uint8_t ff_mpeg1_dc_scale_table[128] = {
76 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
84 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
87 static const uint8_t mpeg2_dc_scale_table1[128] = {
88 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
96 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
99 static const uint8_t mpeg2_dc_scale_table2[128] = {
100 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
108 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
111 static const uint8_t mpeg2_dc_scale_table3[128] = {
112 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
120 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
124 ff_mpeg1_dc_scale_table,
125 mpeg2_dc_scale_table1,
126 mpeg2_dc_scale_table2,
127 mpeg2_dc_scale_table3,
130 const enum PixelFormat ff_pixfmt_list_420[] = {
135 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
143 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
145 uint32_t * restrict state)
153 for (i = 0; i < 3; i++) {
154 uint32_t tmp = *state << 8;
155 *state = tmp + *(p++);
156 if (tmp == 0x100 || p == end)
161 if (p[-1] > 1 ) p += 3;
162 else if (p[-2] ) p += 2;
163 else if (p[-3]|(p[-1]-1)) p++;
170 p = FFMIN(p, end) - 4;
176 /* init common dct for both encoder and decoder */
177 av_cold int ff_dct_common_init(MpegEncContext *s)
179 dsputil_init(&s->dsp, s->avctx);
181 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
182 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
183 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
184 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
186 if (s->flags & CODEC_FLAG_BITEXACT)
187 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
188 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
191 MPV_common_init_mmx(s);
193 MPV_common_init_axp(s);
195 MPV_common_init_mmi(s);
197 MPV_common_init_arm(s);
199 MPV_common_init_altivec(s);
201 MPV_common_init_bfin(s);
204 /* load & permutate scantables
205 * note: only wmv uses different ones
207 if (s->alternate_scan) {
208 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
209 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
211 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
212 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
214 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
215 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
220 void ff_copy_picture(Picture *dst, Picture *src)
223 dst->f.type = FF_BUFFER_TYPE_COPY;
227 * Release a frame buffer
229 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
231 /* Windows Media Image codecs allocate internal buffers with different
232 * dimensions; ignore user defined callbacks for these
234 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
235 ff_thread_release_buffer(s->avctx, (AVFrame *) pic);
237 avcodec_default_release_buffer(s->avctx, (AVFrame *) pic);
238 av_freep(&pic->f.hwaccel_picture_private);
242 * Allocate a frame buffer
244 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
248 if (s->avctx->hwaccel) {
249 assert(!pic->f.hwaccel_picture_private);
250 if (s->avctx->hwaccel->priv_data_size) {
251 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
252 if (!pic->f.hwaccel_picture_private) {
253 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
259 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
260 r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic);
262 r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic);
264 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
265 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
266 r, pic->f.type, pic->f.data[0]);
267 av_freep(&pic->f.hwaccel_picture_private);
271 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
272 s->uvlinesize != pic->f.linesize[1])) {
273 av_log(s->avctx, AV_LOG_ERROR,
274 "get_buffer() failed (stride changed)\n");
275 free_frame_buffer(s, pic);
279 if (pic->f.linesize[1] != pic->f.linesize[2]) {
280 av_log(s->avctx, AV_LOG_ERROR,
281 "get_buffer() failed (uv stride mismatch)\n");
282 free_frame_buffer(s, pic);
290 * Allocate a Picture.
291 * The pixels are allocated/set by calling get_buffer() if shared = 0
293 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
295 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
297 // the + 1 is needed so memset(,,stride*height) does not sig11
299 const int mb_array_size = s->mb_stride * s->mb_height;
300 const int b8_array_size = s->b8_stride * s->mb_height * 2;
301 const int b4_array_size = s->b4_stride * s->mb_height * 4;
306 assert(pic->f.data[0]);
307 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
308 pic->f.type = FF_BUFFER_TYPE_SHARED;
310 assert(!pic->f.data[0]);
312 if (alloc_frame_buffer(s, pic) < 0)
315 s->linesize = pic->f.linesize[0];
316 s->uvlinesize = pic->f.linesize[1];
319 if (pic->f.qscale_table == NULL) {
321 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
322 mb_array_size * sizeof(int16_t), fail)
323 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
324 mb_array_size * sizeof(int16_t), fail)
325 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
326 mb_array_size * sizeof(int8_t ), fail)
329 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
330 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
331 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
332 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
334 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
335 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
337 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
338 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
339 if (s->out_format == FMT_H264) {
340 for (i = 0; i < 2; i++) {
341 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
342 2 * (b4_array_size + 4) * sizeof(int16_t),
344 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
345 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
346 4 * mb_array_size * sizeof(uint8_t), fail)
348 pic->f.motion_subsample_log2 = 2;
349 } else if (s->out_format == FMT_H263 || s->encoding ||
350 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
351 for (i = 0; i < 2; i++) {
352 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
353 2 * (b8_array_size + 4) * sizeof(int16_t),
355 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
356 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
357 4 * mb_array_size * sizeof(uint8_t), fail)
359 pic->f.motion_subsample_log2 = 3;
361 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
362 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
363 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
365 pic->f.qstride = s->mb_stride;
366 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
367 1 * sizeof(AVPanScan), fail)
373 fail: // for the FF_ALLOCZ_OR_GOTO macro
375 free_frame_buffer(s, pic);
380 * Deallocate a picture.
382 static void free_picture(MpegEncContext *s, Picture *pic)
386 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
387 free_frame_buffer(s, pic);
390 av_freep(&pic->mb_var);
391 av_freep(&pic->mc_mb_var);
392 av_freep(&pic->mb_mean);
393 av_freep(&pic->f.mbskip_table);
394 av_freep(&pic->qscale_table_base);
395 av_freep(&pic->mb_type_base);
396 av_freep(&pic->f.dct_coeff);
397 av_freep(&pic->f.pan_scan);
398 pic->f.mb_type = NULL;
399 for (i = 0; i < 2; i++) {
400 av_freep(&pic->motion_val_base[i]);
401 av_freep(&pic->f.ref_index[i]);
404 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
405 for (i = 0; i < 4; i++) {
407 pic->f.data[i] = NULL;
413 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
415 int y_size = s->b8_stride * (2 * s->mb_height + 1);
416 int c_size = s->mb_stride * (s->mb_height + 1);
417 int yc_size = y_size + 2 * c_size;
420 // edge emu needs blocksize + filter length - 1
421 // (= 17x17 for halfpel / 21x21 for h264)
422 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
423 (s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance
425 // FIXME should be linesize instead of s->width * 2
426 // but that is not known before get_buffer()
427 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
428 (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
429 s->me.temp = s->me.scratchpad;
430 s->rd_scratchpad = s->me.scratchpad;
431 s->b_scratchpad = s->me.scratchpad;
432 s->obmc_scratchpad = s->me.scratchpad + 16;
434 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
435 ME_MAP_SIZE * sizeof(uint32_t), fail)
436 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
437 ME_MAP_SIZE * sizeof(uint32_t), fail)
438 if (s->avctx->noise_reduction) {
439 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
440 2 * 64 * sizeof(int), fail)
443 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
444 s->block = s->blocks[0];
446 for (i = 0; i < 12; i++) {
447 s->pblocks[i] = &s->block[i];
450 if (s->out_format == FMT_H263) {
452 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
453 yc_size * sizeof(int16_t) * 16, fail);
454 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
455 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
456 s->ac_val[2] = s->ac_val[1] + c_size;
461 return -1; // free() through MPV_common_end()
464 static void free_duplicate_context(MpegEncContext *s)
469 av_freep(&s->edge_emu_buffer);
470 av_freep(&s->me.scratchpad);
474 s->obmc_scratchpad = NULL;
476 av_freep(&s->dct_error_sum);
477 av_freep(&s->me.map);
478 av_freep(&s->me.score_map);
479 av_freep(&s->blocks);
480 av_freep(&s->ac_val_base);
484 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
486 #define COPY(a) bak->a = src->a
487 COPY(edge_emu_buffer);
492 COPY(obmc_scratchpad);
499 COPY(me.map_generation);
511 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
515 // FIXME copy only needed parts
517 backup_duplicate_context(&bak, dst);
518 memcpy(dst, src, sizeof(MpegEncContext));
519 backup_duplicate_context(dst, &bak);
520 for (i = 0; i < 12; i++) {
521 dst->pblocks[i] = &dst->block[i];
523 // STOP_TIMER("update_duplicate_context")
524 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
527 int ff_mpeg_update_thread_context(AVCodecContext *dst,
528 const AVCodecContext *src)
530 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
532 if (dst == src || !s1->context_initialized)
535 // FIXME can parameters change on I-frames?
536 // in that case dst may need a reinit
537 if (!s->context_initialized) {
538 memcpy(s, s1, sizeof(MpegEncContext));
541 s->picture_range_start += MAX_PICTURE_COUNT;
542 s->picture_range_end += MAX_PICTURE_COUNT;
543 s->bitstream_buffer = NULL;
544 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
549 s->avctx->coded_height = s1->avctx->coded_height;
550 s->avctx->coded_width = s1->avctx->coded_width;
551 s->avctx->width = s1->avctx->width;
552 s->avctx->height = s1->avctx->height;
554 s->coded_picture_number = s1->coded_picture_number;
555 s->picture_number = s1->picture_number;
556 s->input_picture_number = s1->input_picture_number;
558 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
559 memcpy(&s->last_picture, &s1->last_picture,
560 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
562 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
563 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
564 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
566 // Error/bug resilience
567 s->next_p_frame_damaged = s1->next_p_frame_damaged;
568 s->workaround_bugs = s1->workaround_bugs;
571 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
572 (char *) &s1->shape - (char *) &s1->time_increment_bits);
575 s->max_b_frames = s1->max_b_frames;
576 s->low_delay = s1->low_delay;
577 s->dropable = s1->dropable;
579 // DivX handling (doesn't work)
580 s->divx_packed = s1->divx_packed;
582 if (s1->bitstream_buffer) {
583 if (s1->bitstream_buffer_size +
584 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
585 av_fast_malloc(&s->bitstream_buffer,
586 &s->allocated_bitstream_buffer_size,
587 s1->allocated_bitstream_buffer_size);
588 s->bitstream_buffer_size = s1->bitstream_buffer_size;
589 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
590 s1->bitstream_buffer_size);
591 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
592 FF_INPUT_BUFFER_PADDING_SIZE);
595 // MPEG2/interlacing info
596 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
597 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
599 if (!s1->first_field) {
600 s->last_pict_type = s1->pict_type;
601 if (s1->current_picture_ptr)
602 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
604 if (s1->pict_type != AV_PICTURE_TYPE_B) {
605 s->last_non_b_pict_type = s1->pict_type;
613 * Set the given MpegEncContext to common defaults
614 * (same for encoding and decoding).
615 * The changed fields will not depend upon the
616 * prior state of the MpegEncContext.
618 void MPV_common_defaults(MpegEncContext *s)
620 s->y_dc_scale_table =
621 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
622 s->chroma_qscale_table = ff_default_chroma_qscale_table;
623 s->progressive_frame = 1;
624 s->progressive_sequence = 1;
625 s->picture_structure = PICT_FRAME;
627 s->coded_picture_number = 0;
628 s->picture_number = 0;
629 s->input_picture_number = 0;
631 s->picture_in_gop_number = 0;
636 s->picture_range_start = 0;
637 s->picture_range_end = MAX_PICTURE_COUNT;
639 s->slice_context_count = 1;
643 * Set the given MpegEncContext to defaults for decoding.
644 * the changed fields will not depend upon
645 * the prior state of the MpegEncContext.
647 void MPV_decode_defaults(MpegEncContext *s)
649 MPV_common_defaults(s);
653 * init common structure for both encoder and decoder.
654 * this assumes that some variables like width/height are already set
656 av_cold int MPV_common_init(MpegEncContext *s)
658 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
659 int nb_slices = (HAVE_THREADS &&
660 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
661 s->avctx->thread_count : 1;
663 if (s->encoding && s->avctx->slices)
664 nb_slices = s->avctx->slices;
666 if (s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
667 s->mb_height = (s->height + 31) / 32 * 2;
668 else if (s->codec_id != CODEC_ID_H264)
669 s->mb_height = (s->height + 15) / 16;
671 if (s->avctx->pix_fmt == PIX_FMT_NONE) {
672 av_log(s->avctx, AV_LOG_ERROR,
673 "decoding to PIX_FMT_NONE is not supported.\n");
677 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
680 max_slices = FFMIN(MAX_THREADS, s->mb_height);
682 max_slices = MAX_THREADS;
683 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
684 " reducing to %d\n", nb_slices, max_slices);
685 nb_slices = max_slices;
688 if ((s->width || s->height) &&
689 av_image_check_size(s->width, s->height, 0, s->avctx))
692 ff_dct_common_init(s);
694 s->flags = s->avctx->flags;
695 s->flags2 = s->avctx->flags2;
697 if (s->width && s->height) {
698 s->mb_width = (s->width + 15) / 16;
699 s->mb_stride = s->mb_width + 1;
700 s->b8_stride = s->mb_width * 2 + 1;
701 s->b4_stride = s->mb_width * 4 + 1;
702 mb_array_size = s->mb_height * s->mb_stride;
703 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
705 /* set chroma shifts */
706 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
709 /* set default edge pos, will be overriden
710 * in decode_header if needed */
711 s->h_edge_pos = s->mb_width * 16;
712 s->v_edge_pos = s->mb_height * 16;
714 s->mb_num = s->mb_width * s->mb_height;
719 s->block_wrap[3] = s->b8_stride;
721 s->block_wrap[5] = s->mb_stride;
723 y_size = s->b8_stride * (2 * s->mb_height + 1);
724 c_size = s->mb_stride * (s->mb_height + 1);
725 yc_size = y_size + 2 * c_size;
727 /* convert fourcc to upper case */
728 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
730 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
732 s->avctx->coded_frame = (AVFrame *)&s->current_picture;
734 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
735 fail); // error ressilience code looks cleaner with this
736 for (y = 0; y < s->mb_height; y++)
737 for (x = 0; x < s->mb_width; x++)
738 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
740 s->mb_index2xy[s->mb_height * s->mb_width] =
741 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
744 /* Allocate MV tables */
745 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
746 mv_table_size * 2 * sizeof(int16_t), fail);
747 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
748 mv_table_size * 2 * sizeof(int16_t), fail);
749 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
750 mv_table_size * 2 * sizeof(int16_t), fail);
751 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
752 mv_table_size * 2 * sizeof(int16_t), fail);
753 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
754 mv_table_size * 2 * sizeof(int16_t), fail);
755 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
756 mv_table_size * 2 * sizeof(int16_t), fail);
757 s->p_mv_table = s->p_mv_table_base +
759 s->b_forw_mv_table = s->b_forw_mv_table_base +
761 s->b_back_mv_table = s->b_back_mv_table_base +
763 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
765 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
767 s->b_direct_mv_table = s->b_direct_mv_table_base +
770 if (s->msmpeg4_version) {
771 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
772 2 * 2 * (MAX_LEVEL + 1) *
773 (MAX_RUN + 1) * 2 * sizeof(int), fail);
775 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
777 /* Allocate MB type table */
778 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
779 sizeof(uint16_t), fail); // needed for encoding
781 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
784 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
785 64 * 32 * sizeof(int), fail);
786 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
787 64 * 32 * sizeof(int), fail);
788 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
789 64 * 32 * 2 * sizeof(uint16_t), fail);
790 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
791 64 * 32 * 2 * sizeof(uint16_t), fail);
792 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
793 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
794 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
795 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
797 if (s->avctx->noise_reduction) {
798 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
799 2 * 64 * sizeof(uint16_t), fail);
804 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
805 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
806 s->picture_count * sizeof(Picture), fail);
807 for (i = 0; i < s->picture_count; i++) {
808 avcodec_get_frame_defaults((AVFrame *) &s->picture[i]);
811 if (s->width && s->height) {
812 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
813 mb_array_size * sizeof(uint8_t), fail);
815 if (s->codec_id == CODEC_ID_MPEG4 ||
816 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
817 /* interlaced direct mode decoding tables */
818 for (i = 0; i < 2; i++) {
820 for (j = 0; j < 2; j++) {
821 for (k = 0; k < 2; k++) {
822 FF_ALLOCZ_OR_GOTO(s->avctx,
823 s->b_field_mv_table_base[i][j][k],
824 mv_table_size * 2 * sizeof(int16_t),
826 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
829 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
830 mb_array_size * 2 * sizeof(uint8_t),
832 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
833 mv_table_size * 2 * sizeof(int16_t),
835 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
838 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
839 mb_array_size * 2 * sizeof(uint8_t),
843 if (s->out_format == FMT_H263) {
845 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
846 s->coded_block = s->coded_block_base + s->b8_stride + 1;
848 /* cbp, ac_pred, pred_dir */
849 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
850 mb_array_size * sizeof(uint8_t), fail);
851 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
852 mb_array_size * sizeof(uint8_t), fail);
855 if (s->h263_pred || s->h263_plus || !s->encoding) {
857 // MN: we need these for error resilience of intra-frames
858 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
859 yc_size * sizeof(int16_t), fail);
860 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
861 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
862 s->dc_val[2] = s->dc_val[1] + c_size;
863 for (i = 0; i < yc_size; i++)
864 s->dc_val_base[i] = 1024;
867 /* which mb is a intra block */
868 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
869 memset(s->mbintra_table, 1, mb_array_size);
871 /* init macroblock skip table */
872 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
873 // Note the + 1 is for a quicker mpeg4 slice_end detection
875 s->parse_context.state = -1;
876 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
877 s->avctx->debug_mv) {
878 s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
879 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
880 s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
881 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
882 s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
883 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
887 s->context_initialized = 1;
888 s->thread_context[0] = s;
890 if (s->width && s->height) {
892 for (i = 1; i < nb_slices; i++) {
893 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
894 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
897 for (i = 0; i < nb_slices; i++) {
898 if (init_duplicate_context(s->thread_context[i], s) < 0)
900 s->thread_context[i]->start_mb_y =
901 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
902 s->thread_context[i]->end_mb_y =
903 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
906 if (init_duplicate_context(s, s) < 0)
909 s->end_mb_y = s->mb_height;
911 s->slice_context_count = nb_slices;
920 /* init common structure for both encoder and decoder */
921 void MPV_common_end(MpegEncContext *s)
925 if (s->slice_context_count > 1) {
926 for (i = 0; i < s->slice_context_count; i++) {
927 free_duplicate_context(s->thread_context[i]);
929 for (i = 1; i < s->slice_context_count; i++) {
930 av_freep(&s->thread_context[i]);
932 s->slice_context_count = 1;
933 } else free_duplicate_context(s);
935 av_freep(&s->parse_context.buffer);
936 s->parse_context.buffer_size = 0;
938 av_freep(&s->mb_type);
939 av_freep(&s->p_mv_table_base);
940 av_freep(&s->b_forw_mv_table_base);
941 av_freep(&s->b_back_mv_table_base);
942 av_freep(&s->b_bidir_forw_mv_table_base);
943 av_freep(&s->b_bidir_back_mv_table_base);
944 av_freep(&s->b_direct_mv_table_base);
945 s->p_mv_table = NULL;
946 s->b_forw_mv_table = NULL;
947 s->b_back_mv_table = NULL;
948 s->b_bidir_forw_mv_table = NULL;
949 s->b_bidir_back_mv_table = NULL;
950 s->b_direct_mv_table = NULL;
951 for (i = 0; i < 2; i++) {
952 for (j = 0; j < 2; j++) {
953 for (k = 0; k < 2; k++) {
954 av_freep(&s->b_field_mv_table_base[i][j][k]);
955 s->b_field_mv_table[i][j][k] = NULL;
957 av_freep(&s->b_field_select_table[i][j]);
958 av_freep(&s->p_field_mv_table_base[i][j]);
959 s->p_field_mv_table[i][j] = NULL;
961 av_freep(&s->p_field_select_table[i]);
964 av_freep(&s->dc_val_base);
965 av_freep(&s->coded_block_base);
966 av_freep(&s->mbintra_table);
967 av_freep(&s->cbp_table);
968 av_freep(&s->pred_dir_table);
970 av_freep(&s->mbskip_table);
971 av_freep(&s->bitstream_buffer);
972 s->allocated_bitstream_buffer_size = 0;
974 av_freep(&s->avctx->stats_out);
975 av_freep(&s->ac_stats);
976 av_freep(&s->error_status_table);
977 av_freep(&s->mb_index2xy);
978 av_freep(&s->lambda_table);
979 av_freep(&s->q_intra_matrix);
980 av_freep(&s->q_inter_matrix);
981 av_freep(&s->q_intra_matrix16);
982 av_freep(&s->q_inter_matrix16);
983 av_freep(&s->input_picture);
984 av_freep(&s->reordered_input_picture);
985 av_freep(&s->dct_offset);
987 if (s->picture && !s->avctx->internal->is_copy) {
988 for (i = 0; i < s->picture_count; i++) {
989 free_picture(s, &s->picture[i]);
992 av_freep(&s->picture);
993 s->context_initialized = 0;
994 s->last_picture_ptr =
995 s->next_picture_ptr =
996 s->current_picture_ptr = NULL;
997 s->linesize = s->uvlinesize = 0;
999 for (i = 0; i < 3; i++)
1000 av_freep(&s->visualization_buffer[i]);
1002 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
1003 avcodec_default_free_buffers(s->avctx);
1006 void init_rl(RLTable *rl,
1007 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1009 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1010 uint8_t index_run[MAX_RUN + 1];
1011 int last, run, level, start, end, i;
1013 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1014 if (static_store && rl->max_level[0])
1017 /* compute max_level[], max_run[] and index_run[] */
1018 for (last = 0; last < 2; last++) {
1027 memset(max_level, 0, MAX_RUN + 1);
1028 memset(max_run, 0, MAX_LEVEL + 1);
1029 memset(index_run, rl->n, MAX_RUN + 1);
1030 for (i = start; i < end; i++) {
1031 run = rl->table_run[i];
1032 level = rl->table_level[i];
1033 if (index_run[run] == rl->n)
1035 if (level > max_level[run])
1036 max_level[run] = level;
1037 if (run > max_run[level])
1038 max_run[level] = run;
1041 rl->max_level[last] = static_store[last];
1043 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1044 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1046 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1048 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1049 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1051 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1053 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1054 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1058 void init_vlc_rl(RLTable *rl)
1062 for (q = 0; q < 32; q++) {
1064 int qadd = (q - 1) | 1;
1070 for (i = 0; i < rl->vlc.table_size; i++) {
1071 int code = rl->vlc.table[i][0];
1072 int len = rl->vlc.table[i][1];
1075 if (len == 0) { // illegal code
1078 } else if (len < 0) { // more bits needed
1082 if (code == rl->n) { // esc
1086 run = rl->table_run[code] + 1;
1087 level = rl->table_level[code] * qmul + qadd;
1088 if (code >= rl->last) run += 192;
1091 rl->rl_vlc[q][i].len = len;
1092 rl->rl_vlc[q][i].level = level;
1093 rl->rl_vlc[q][i].run = run;
1098 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1102 /* release non reference frames */
1103 for (i = 0; i < s->picture_count; i++) {
1104 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1105 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1106 (remove_current || &s->picture[i] != s->current_picture_ptr)
1107 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1108 free_frame_buffer(s, &s->picture[i]);
1113 int ff_find_unused_picture(MpegEncContext *s, int shared)
1118 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1119 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1123 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1124 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
1127 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1128 if (s->picture[i].f.data[0] == NULL)
1133 return AVERROR_INVALIDDATA;
1136 static void update_noise_reduction(MpegEncContext *s)
1140 for (intra = 0; intra < 2; intra++) {
1141 if (s->dct_count[intra] > (1 << 16)) {
1142 for (i = 0; i < 64; i++) {
1143 s->dct_error_sum[intra][i] >>= 1;
1145 s->dct_count[intra] >>= 1;
1148 for (i = 0; i < 64; i++) {
1149 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1150 s->dct_count[intra] +
1151 s->dct_error_sum[intra][i] / 2) /
1152 (s->dct_error_sum[intra][i] + 1);
1158 * generic function for encode/decode called after coding/decoding
1159 * the header and before a frame is coded/decoded.
1161 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1167 assert(s->last_picture_ptr == NULL || s->out_format != FMT_H264 ||
1168 s->codec_id == CODEC_ID_SVQ3);
1170 /* mark & release old frames */
1171 if (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3) {
1172 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1173 s->last_picture_ptr != s->next_picture_ptr &&
1174 s->last_picture_ptr->f.data[0]) {
1175 if (s->last_picture_ptr->owner2 == s)
1176 free_frame_buffer(s, s->last_picture_ptr);
1179 /* release forgotten pictures */
1180 /* if (mpeg124/h263) */
1182 for (i = 0; i < s->picture_count; i++) {
1183 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1184 &s->picture[i] != s->last_picture_ptr &&
1185 &s->picture[i] != s->next_picture_ptr &&
1186 s->picture[i].f.reference) {
1187 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1188 av_log(avctx, AV_LOG_ERROR,
1189 "releasing zombie picture\n");
1190 free_frame_buffer(s, &s->picture[i]);
1197 ff_release_unused_pictures(s, 1);
1199 if (s->current_picture_ptr &&
1200 s->current_picture_ptr->f.data[0] == NULL) {
1201 // we already have a unused image
1202 // (maybe it was set before reading the header)
1203 pic = s->current_picture_ptr;
1205 i = ff_find_unused_picture(s, 0);
1206 pic = &s->picture[i];
1209 pic->f.reference = 0;
1211 if (s->codec_id == CODEC_ID_H264)
1212 pic->f.reference = s->picture_structure;
1213 else if (s->pict_type != AV_PICTURE_TYPE_B)
1214 pic->f.reference = 3;
1217 pic->f.coded_picture_number = s->coded_picture_number++;
1219 if (ff_alloc_picture(s, pic, 0) < 0)
1222 s->current_picture_ptr = pic;
1223 // FIXME use only the vars from current_pic
1224 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1225 if (s->codec_id == CODEC_ID_MPEG1VIDEO ||
1226 s->codec_id == CODEC_ID_MPEG2VIDEO) {
1227 if (s->picture_structure != PICT_FRAME)
1228 s->current_picture_ptr->f.top_field_first =
1229 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1231 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1232 !s->progressive_sequence;
1233 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1236 s->current_picture_ptr->f.pict_type = s->pict_type;
1237 // if (s->flags && CODEC_FLAG_QSCALE)
1238 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1239 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1241 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1243 if (s->pict_type != AV_PICTURE_TYPE_B) {
1244 s->last_picture_ptr = s->next_picture_ptr;
1246 s->next_picture_ptr = s->current_picture_ptr;
1248 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1249 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1250 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1251 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1252 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1253 s->pict_type, s->dropable); */
1255 if (s->codec_id != CODEC_ID_H264) {
1256 if ((s->last_picture_ptr == NULL ||
1257 s->last_picture_ptr->f.data[0] == NULL) &&
1258 (s->pict_type != AV_PICTURE_TYPE_I ||
1259 s->picture_structure != PICT_FRAME)) {
1260 if (s->pict_type != AV_PICTURE_TYPE_I)
1261 av_log(avctx, AV_LOG_ERROR,
1262 "warning: first frame is no keyframe\n");
1263 else if (s->picture_structure != PICT_FRAME)
1264 av_log(avctx, AV_LOG_INFO,
1265 "allocate dummy last picture for field based first keyframe\n");
1267 /* Allocate a dummy frame */
1268 i = ff_find_unused_picture(s, 0);
1269 s->last_picture_ptr = &s->picture[i];
1270 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1272 ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
1274 ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
1277 if ((s->next_picture_ptr == NULL ||
1278 s->next_picture_ptr->f.data[0] == NULL) &&
1279 s->pict_type == AV_PICTURE_TYPE_B) {
1280 /* Allocate a dummy frame */
1281 i = ff_find_unused_picture(s, 0);
1282 s->next_picture_ptr = &s->picture[i];
1283 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1285 ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
1287 ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
1292 if (s->last_picture_ptr)
1293 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1294 if (s->next_picture_ptr)
1295 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1297 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
1298 (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3)) {
1299 if (s->next_picture_ptr)
1300 s->next_picture_ptr->owner2 = s;
1301 if (s->last_picture_ptr)
1302 s->last_picture_ptr->owner2 = s;
1305 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1306 s->last_picture_ptr->f.data[0]));
1308 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1310 for (i = 0; i < 4; i++) {
1311 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1312 s->current_picture.f.data[i] +=
1313 s->current_picture.f.linesize[i];
1315 s->current_picture.f.linesize[i] *= 2;
1316 s->last_picture.f.linesize[i] *= 2;
1317 s->next_picture.f.linesize[i] *= 2;
1321 s->err_recognition = avctx->err_recognition;
1323 /* set dequantizer, we can't do it during init as
1324 * it might change for mpeg4 and we can't do it in the header
1325 * decode as init is not called for mpeg4 there yet */
1326 if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1327 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1328 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1329 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1330 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1331 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1333 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1334 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1337 if (s->dct_error_sum) {
1338 assert(s->avctx->noise_reduction && s->encoding);
1339 update_noise_reduction(s);
1342 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1343 return ff_xvmc_field_start(s, avctx);
1348 /* generic function for encode/decode called after a
1349 * frame has been coded/decoded. */
1350 void MPV_frame_end(MpegEncContext *s)
1353 /* redraw edges for the frame if decoding didn't complete */
1354 // just to make sure that all data is rendered.
1355 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1356 ff_xvmc_field_end(s);
1357 } else if ((s->error_count || s->encoding) &&
1358 !s->avctx->hwaccel &&
1359 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1360 s->unrestricted_mv &&
1361 s->current_picture.f.reference &&
1363 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1364 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1365 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1366 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1367 s->h_edge_pos, s->v_edge_pos,
1368 EDGE_WIDTH, EDGE_WIDTH,
1369 EDGE_TOP | EDGE_BOTTOM);
1370 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1371 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1372 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1373 EDGE_TOP | EDGE_BOTTOM);
1374 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1375 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1376 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1377 EDGE_TOP | EDGE_BOTTOM);
1382 s->last_pict_type = s->pict_type;
1383 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1384 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1385 s->last_non_b_pict_type = s->pict_type;
1388 /* copy back current_picture variables */
1389 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1390 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1391 s->picture[i] = s->current_picture;
1395 assert(i < MAX_PICTURE_COUNT);
1399 /* release non-reference frames */
1400 for (i = 0; i < s->picture_count; i++) {
1401 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1402 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1403 free_frame_buffer(s, &s->picture[i]);
1407 // clear copies, to avoid confusion
1409 memset(&s->last_picture, 0, sizeof(Picture));
1410 memset(&s->next_picture, 0, sizeof(Picture));
1411 memset(&s->current_picture, 0, sizeof(Picture));
1413 s->avctx->coded_frame = (AVFrame *) s->current_picture_ptr;
1415 if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
1416 ff_thread_report_progress((AVFrame *) s->current_picture_ptr,
1417 s->mb_height - 1, 0);
1422 * Draw a line from (ex, ey) -> (sx, sy).
1423 * @param w width of the image
1424 * @param h height of the image
1425 * @param stride stride/linesize of the image
1426 * @param color color of the arrow
1428 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1429 int w, int h, int stride, int color)
1433 sx = av_clip(sx, 0, w - 1);
1434 sy = av_clip(sy, 0, h - 1);
1435 ex = av_clip(ex, 0, w - 1);
1436 ey = av_clip(ey, 0, h - 1);
1438 buf[sy * stride + sx] += color;
1440 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1442 FFSWAP(int, sx, ex);
1443 FFSWAP(int, sy, ey);
1445 buf += sx + sy * stride;
1447 f = ((ey - sy) << 16) / ex;
1448 for (x = 0; x = ex; x++) {
1450 fr = (x * f) & 0xFFFF;
1451 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1452 buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1456 FFSWAP(int, sx, ex);
1457 FFSWAP(int, sy, ey);
1459 buf += sx + sy * stride;
1462 f = ((ex - sx) << 16) / ey;
1465 for (y = 0; y = ey; y++) {
1467 fr = (y * f) & 0xFFFF;
1468 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1469 buf[y * stride + x + 1] += (color * fr ) >> 16;
1475 * Draw an arrow from (ex, ey) -> (sx, sy).
1476 * @param w width of the image
1477 * @param h height of the image
1478 * @param stride stride/linesize of the image
1479 * @param color color of the arrow
1481 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1482 int ey, int w, int h, int stride, int color)
1486 sx = av_clip(sx, -100, w + 100);
1487 sy = av_clip(sy, -100, h + 100);
1488 ex = av_clip(ex, -100, w + 100);
1489 ey = av_clip(ey, -100, h + 100);
1494 if (dx * dx + dy * dy > 3 * 3) {
1497 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1499 // FIXME subpixel accuracy
1500 rx = ROUNDED_DIV(rx * 3 << 4, length);
1501 ry = ROUNDED_DIV(ry * 3 << 4, length);
1503 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1504 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1506 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1510 * Print debugging info for the given picture.
1512 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
1514 if (s->avctx->hwaccel || !pict || !pict->mb_type)
1517 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1520 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1521 switch (pict->pict_type) {
1522 case AV_PICTURE_TYPE_I:
1523 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1525 case AV_PICTURE_TYPE_P:
1526 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1528 case AV_PICTURE_TYPE_B:
1529 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1531 case AV_PICTURE_TYPE_S:
1532 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1534 case AV_PICTURE_TYPE_SI:
1535 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1537 case AV_PICTURE_TYPE_SP:
1538 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1541 for (y = 0; y < s->mb_height; y++) {
1542 for (x = 0; x < s->mb_width; x++) {
1543 if (s->avctx->debug & FF_DEBUG_SKIP) {
1544 int count = s->mbskip_table[x + y * s->mb_stride];
1547 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1549 if (s->avctx->debug & FF_DEBUG_QP) {
1550 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1551 pict->qscale_table[x + y * s->mb_stride]);
1553 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1554 int mb_type = pict->mb_type[x + y * s->mb_stride];
1555 // Type & MV direction
1556 if (IS_PCM(mb_type))
1557 av_log(s->avctx, AV_LOG_DEBUG, "P");
1558 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1559 av_log(s->avctx, AV_LOG_DEBUG, "A");
1560 else if (IS_INTRA4x4(mb_type))
1561 av_log(s->avctx, AV_LOG_DEBUG, "i");
1562 else if (IS_INTRA16x16(mb_type))
1563 av_log(s->avctx, AV_LOG_DEBUG, "I");
1564 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1565 av_log(s->avctx, AV_LOG_DEBUG, "d");
1566 else if (IS_DIRECT(mb_type))
1567 av_log(s->avctx, AV_LOG_DEBUG, "D");
1568 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1569 av_log(s->avctx, AV_LOG_DEBUG, "g");
1570 else if (IS_GMC(mb_type))
1571 av_log(s->avctx, AV_LOG_DEBUG, "G");
1572 else if (IS_SKIP(mb_type))
1573 av_log(s->avctx, AV_LOG_DEBUG, "S");
1574 else if (!USES_LIST(mb_type, 1))
1575 av_log(s->avctx, AV_LOG_DEBUG, ">");
1576 else if (!USES_LIST(mb_type, 0))
1577 av_log(s->avctx, AV_LOG_DEBUG, "<");
1579 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1580 av_log(s->avctx, AV_LOG_DEBUG, "X");
1584 if (IS_8X8(mb_type))
1585 av_log(s->avctx, AV_LOG_DEBUG, "+");
1586 else if (IS_16X8(mb_type))
1587 av_log(s->avctx, AV_LOG_DEBUG, "-");
1588 else if (IS_8X16(mb_type))
1589 av_log(s->avctx, AV_LOG_DEBUG, "|");
1590 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1591 av_log(s->avctx, AV_LOG_DEBUG, " ");
1593 av_log(s->avctx, AV_LOG_DEBUG, "?");
1596 if (IS_INTERLACED(mb_type))
1597 av_log(s->avctx, AV_LOG_DEBUG, "=");
1599 av_log(s->avctx, AV_LOG_DEBUG, " ");
1601 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1603 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1607 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1608 (s->avctx->debug_mv)) {
1609 const int shift = 1 + s->quarter_sample;
1613 int h_chroma_shift, v_chroma_shift, block_height;
1614 const int width = s->avctx->width;
1615 const int height = s->avctx->height;
1616 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1617 const int mv_stride = (s->mb_width << mv_sample_log2) +
1618 (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1619 s->low_delay = 0; // needed to see the vectors without trashing the buffers
1621 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1622 &h_chroma_shift, &v_chroma_shift);
1623 for (i = 0; i < 3; i++) {
1624 memcpy(s->visualization_buffer[i], pict->data[i],
1625 (i == 0) ? pict->linesize[i] * height:
1626 pict->linesize[i] * height >> v_chroma_shift);
1627 pict->data[i] = s->visualization_buffer[i];
1629 pict->type = FF_BUFFER_TYPE_COPY;
1630 ptr = pict->data[0];
1631 block_height = 16 >> v_chroma_shift;
1633 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1635 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1636 const int mb_index = mb_x + mb_y * s->mb_stride;
1637 if ((s->avctx->debug_mv) && pict->motion_val) {
1639 for (type = 0; type < 3; type++) {
1643 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1644 (pict->pict_type!= AV_PICTURE_TYPE_P))
1649 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1650 (pict->pict_type!= AV_PICTURE_TYPE_B))
1655 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1656 (pict->pict_type!= AV_PICTURE_TYPE_B))
1661 if (!USES_LIST(pict->mb_type[mb_index], direction))
1664 if (IS_8X8(pict->mb_type[mb_index])) {
1666 for (i = 0; i < 4; i++) {
1667 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1668 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1669 int xy = (mb_x * 2 + (i & 1) +
1670 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1671 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1672 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1673 draw_arrow(ptr, sx, sy, mx, my, width,
1674 height, s->linesize, 100);
1676 } else if (IS_16X8(pict->mb_type[mb_index])) {
1678 for (i = 0; i < 2; i++) {
1679 int sx = mb_x * 16 + 8;
1680 int sy = mb_y * 16 + 4 + 8 * i;
1681 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1682 int mx = (pict->motion_val[direction][xy][0] >> shift);
1683 int my = (pict->motion_val[direction][xy][1] >> shift);
1685 if (IS_INTERLACED(pict->mb_type[mb_index]))
1688 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1689 height, s->linesize, 100);
1691 } else if (IS_8X16(pict->mb_type[mb_index])) {
1693 for (i = 0; i < 2; i++) {
1694 int sx = mb_x * 16 + 4 + 8 * i;
1695 int sy = mb_y * 16 + 8;
1696 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1697 int mx = pict->motion_val[direction][xy][0] >> shift;
1698 int my = pict->motion_val[direction][xy][1] >> shift;
1700 if (IS_INTERLACED(pict->mb_type[mb_index]))
1703 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1704 height, s->linesize, 100);
1707 int sx = mb_x * 16 + 8;
1708 int sy = mb_y * 16 + 8;
1709 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
1710 int mx = pict->motion_val[direction][xy][0] >> shift + sx;
1711 int my = pict->motion_val[direction][xy][1] >> shift + sy;
1712 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1716 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1717 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1718 0x0101010101010101ULL;
1720 for (y = 0; y < block_height; y++) {
1721 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1722 (block_height * mb_y + y) *
1723 pict->linesize[1]) = c;
1724 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1725 (block_height * mb_y + y) *
1726 pict->linesize[2]) = c;
1729 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1731 int mb_type = pict->mb_type[mb_index];
1734 #define COLOR(theta, r) \
1735 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1736 v = (int)(128 + r * sin(theta * 3.141592 / 180));
1740 if (IS_PCM(mb_type)) {
1742 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1743 IS_INTRA16x16(mb_type)) {
1745 } else if (IS_INTRA4x4(mb_type)) {
1747 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1749 } else if (IS_DIRECT(mb_type)) {
1751 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1753 } else if (IS_GMC(mb_type)) {
1755 } else if (IS_SKIP(mb_type)) {
1757 } else if (!USES_LIST(mb_type, 1)) {
1759 } else if (!USES_LIST(mb_type, 0)) {
1762 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1766 u *= 0x0101010101010101ULL;
1767 v *= 0x0101010101010101ULL;
1768 for (y = 0; y < block_height; y++) {
1769 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1770 (block_height * mb_y + y) * pict->linesize[1]) = u;
1771 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1772 (block_height * mb_y + y) * pict->linesize[2]) = v;
1776 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
1777 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
1778 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1779 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
1780 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1782 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
1783 for (y = 0; y < 16; y++)
1784 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
1785 pict->linesize[0]] ^= 0x80;
1787 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
1788 int dm = 1 << (mv_sample_log2 - 2);
1789 for (i = 0; i < 4; i++) {
1790 int sx = mb_x * 16 + 8 * (i & 1);
1791 int sy = mb_y * 16 + 8 * (i >> 1);
1792 int xy = (mb_x * 2 + (i & 1) +
1793 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1795 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
1796 if (mv[0] != mv[dm] ||
1797 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
1798 for (y = 0; y < 8; y++)
1799 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
1800 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
1801 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
1802 pict->linesize[0]) ^= 0x8080808080808080ULL;
1806 if (IS_INTERLACED(mb_type) &&
1807 s->codec_id == CODEC_ID_H264) {
1811 s->mbskip_table[mb_index] = 0;
1817 static inline int hpel_motion_lowres(MpegEncContext *s,
1818 uint8_t *dest, uint8_t *src,
1819 int field_based, int field_select,
1820 int src_x, int src_y,
1821 int width, int height, int stride,
1822 int h_edge_pos, int v_edge_pos,
1823 int w, int h, h264_chroma_mc_func *pix_op,
1824 int motion_x, int motion_y)
1826 const int lowres = s->avctx->lowres;
1827 const int op_index = FFMIN(lowres, 2);
1828 const int s_mask = (2 << lowres) - 1;
1832 if (s->quarter_sample) {
1837 sx = motion_x & s_mask;
1838 sy = motion_y & s_mask;
1839 src_x += motion_x >> lowres + 1;
1840 src_y += motion_y >> lowres + 1;
1842 src += src_y * stride + src_x;
1844 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
1845 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1846 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
1847 (h + 1) << field_based, src_x,
1848 src_y << field_based,
1851 src = s->edge_emu_buffer;
1855 sx = (sx << 2) >> lowres;
1856 sy = (sy << 2) >> lowres;
1859 pix_op[op_index](dest, src, stride, h, sx, sy);
1863 /* apply one mpeg motion vector to the three components */
1864 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1871 uint8_t **ref_picture,
1872 h264_chroma_mc_func *pix_op,
1873 int motion_x, int motion_y,
1876 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1877 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
1879 const int lowres = s->avctx->lowres;
1880 const int op_index = FFMIN(lowres, 2);
1881 const int block_s = 8>>lowres;
1882 const int s_mask = (2 << lowres) - 1;
1883 const int h_edge_pos = s->h_edge_pos >> lowres;
1884 const int v_edge_pos = s->v_edge_pos >> lowres;
1885 linesize = s->current_picture.f.linesize[0] << field_based;
1886 uvlinesize = s->current_picture.f.linesize[1] << field_based;
1888 // FIXME obviously not perfect but qpel will not work in lowres anyway
1889 if (s->quarter_sample) {
1895 motion_y += (bottom_field - field_select) * (1 << lowres - 1);
1898 sx = motion_x & s_mask;
1899 sy = motion_y & s_mask;
1900 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
1901 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
1903 if (s->out_format == FMT_H263) {
1904 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
1905 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
1906 uvsrc_x = src_x >> 1;
1907 uvsrc_y = src_y >> 1;
1908 } else if (s->out_format == FMT_H261) {
1909 // even chroma mv's are full pel in H261
1912 uvsx = (2 * mx) & s_mask;
1913 uvsy = (2 * my) & s_mask;
1914 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
1915 uvsrc_y = mb_y * block_s + (my >> lowres);
1921 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
1922 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
1925 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1926 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1927 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1929 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
1930 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1931 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
1932 s->linesize, 17, 17 + field_based,
1933 src_x, src_y << field_based, h_edge_pos,
1935 ptr_y = s->edge_emu_buffer;
1936 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
1937 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
1938 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
1940 uvsrc_x, uvsrc_y << field_based,
1941 h_edge_pos >> 1, v_edge_pos >> 1);
1942 s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
1944 uvsrc_x, uvsrc_y << field_based,
1945 h_edge_pos >> 1, v_edge_pos >> 1);
1947 ptr_cr = uvbuf + 16;
1951 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
1953 dest_y += s->linesize;
1954 dest_cb += s->uvlinesize;
1955 dest_cr += s->uvlinesize;
1959 ptr_y += s->linesize;
1960 ptr_cb += s->uvlinesize;
1961 ptr_cr += s->uvlinesize;
1964 sx = (sx << 2) >> lowres;
1965 sy = (sy << 2) >> lowres;
1966 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
1968 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
1969 uvsx = (uvsx << 2) >> lowres;
1970 uvsy = (uvsy << 2) >> lowres;
1971 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift,
1973 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift,
1976 // FIXME h261 lowres loop filter
1979 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1980 uint8_t *dest_cb, uint8_t *dest_cr,
1981 uint8_t **ref_picture,
1982 h264_chroma_mc_func * pix_op,
1985 const int lowres = s->avctx->lowres;
1986 const int op_index = FFMIN(lowres, 2);
1987 const int block_s = 8 >> lowres;
1988 const int s_mask = (2 << lowres) - 1;
1989 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
1990 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
1991 int emu = 0, src_x, src_y, offset, sx, sy;
1994 if (s->quarter_sample) {
1999 /* In case of 8X8, we construct a single chroma motion vector
2000 with a special rounding */
2001 mx = ff_h263_round_chroma(mx);
2002 my = ff_h263_round_chroma(my);
2006 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2007 src_y = s->mb_y * block_s + (my >> lowres + 1);
2009 offset = src_y * s->uvlinesize + src_x;
2010 ptr = ref_picture[1] + offset;
2011 if (s->flags & CODEC_FLAG_EMU_EDGE) {
2012 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2013 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2014 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
2015 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2016 ptr = s->edge_emu_buffer;
2020 sx = (sx << 2) >> lowres;
2021 sy = (sy << 2) >> lowres;
2022 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2024 ptr = ref_picture[2] + offset;
2026 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
2027 src_x, src_y, h_edge_pos, v_edge_pos);
2028 ptr = s->edge_emu_buffer;
2030 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2034 * motion compensation of a single macroblock
2036 * @param dest_y luma destination pointer
2037 * @param dest_cb chroma cb/u destination pointer
2038 * @param dest_cr chroma cr/v destination pointer
2039 * @param dir direction (0->forward, 1->backward)
2040 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2041 * @param pix_op halfpel motion compensation function (average or put normally)
2042 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2044 static inline void MPV_motion_lowres(MpegEncContext *s,
2045 uint8_t *dest_y, uint8_t *dest_cb,
2047 int dir, uint8_t **ref_picture,
2048 h264_chroma_mc_func *pix_op)
2052 const int lowres = s->avctx->lowres;
2053 const int block_s = 8 >>lowres;
2058 switch (s->mv_type) {
2060 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2062 ref_picture, pix_op,
2063 s->mv[dir][0][0], s->mv[dir][0][1],
2069 for (i = 0; i < 4; i++) {
2070 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2071 s->linesize) * block_s,
2072 ref_picture[0], 0, 0,
2073 (2 * mb_x + (i & 1)) * block_s,
2074 (2 * mb_y + (i >> 1)) * block_s,
2075 s->width, s->height, s->linesize,
2076 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2077 block_s, block_s, pix_op,
2078 s->mv[dir][i][0], s->mv[dir][i][1]);
2080 mx += s->mv[dir][i][0];
2081 my += s->mv[dir][i][1];
2084 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2085 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2089 if (s->picture_structure == PICT_FRAME) {
2091 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2092 1, 0, s->field_select[dir][0],
2093 ref_picture, pix_op,
2094 s->mv[dir][0][0], s->mv[dir][0][1],
2097 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2098 1, 1, s->field_select[dir][1],
2099 ref_picture, pix_op,
2100 s->mv[dir][1][0], s->mv[dir][1][1],
2103 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2104 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2105 ref_picture = s->current_picture_ptr->f.data;
2108 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2109 0, 0, s->field_select[dir][0],
2110 ref_picture, pix_op,
2112 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2116 for (i = 0; i < 2; i++) {
2117 uint8_t **ref2picture;
2119 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2120 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2121 ref2picture = ref_picture;
2123 ref2picture = s->current_picture_ptr->f.data;
2126 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2127 0, 0, s->field_select[dir][i],
2128 ref2picture, pix_op,
2129 s->mv[dir][i][0], s->mv[dir][i][1] +
2130 2 * block_s * i, block_s, mb_y >> 1);
2132 dest_y += 2 * block_s * s->linesize;
2133 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2134 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2138 if (s->picture_structure == PICT_FRAME) {
2139 for (i = 0; i < 2; i++) {
2141 for (j = 0; j < 2; j++) {
2142 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2144 ref_picture, pix_op,
2145 s->mv[dir][2 * i + j][0],
2146 s->mv[dir][2 * i + j][1],
2149 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2152 for (i = 0; i < 2; i++) {
2153 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2154 0, 0, s->picture_structure != i + 1,
2155 ref_picture, pix_op,
2156 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2157 2 * block_s, mb_y >> 1);
2159 // after put we make avg of the same block
2160 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2162 // opposite parity is always in the same
2163 // frame if this is second field
2164 if (!s->first_field) {
2165 ref_picture = s->current_picture_ptr->f.data;
2176 * find the lowest MB row referenced in the MVs
2178 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2180 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2181 int my, off, i, mvs;
2183 if (s->picture_structure != PICT_FRAME) goto unhandled;
2185 switch (s->mv_type) {
2199 for (i = 0; i < mvs; i++) {
2200 my = s->mv[dir][i][1]<<qpel_shift;
2201 my_max = FFMAX(my_max, my);
2202 my_min = FFMIN(my_min, my);
2205 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2207 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2209 return s->mb_height-1;
2212 /* put block[] to dest[] */
2213 static inline void put_dct(MpegEncContext *s,
2214 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2216 s->dct_unquantize_intra(s, block, i, qscale);
2217 s->dsp.idct_put (dest, line_size, block);
2220 /* add block[] to dest[] */
2221 static inline void add_dct(MpegEncContext *s,
2222 DCTELEM *block, int i, uint8_t *dest, int line_size)
2224 if (s->block_last_index[i] >= 0) {
2225 s->dsp.idct_add (dest, line_size, block);
2229 static inline void add_dequant_dct(MpegEncContext *s,
2230 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2232 if (s->block_last_index[i] >= 0) {
2233 s->dct_unquantize_inter(s, block, i, qscale);
2235 s->dsp.idct_add (dest, line_size, block);
2240 * Clean dc, ac, coded_block for the current non-intra MB.
2242 void ff_clean_intra_table_entries(MpegEncContext *s)
2244 int wrap = s->b8_stride;
2245 int xy = s->block_index[0];
2248 s->dc_val[0][xy + 1 ] =
2249 s->dc_val[0][xy + wrap] =
2250 s->dc_val[0][xy + 1 + wrap] = 1024;
2252 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2253 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2254 if (s->msmpeg4_version>=3) {
2255 s->coded_block[xy ] =
2256 s->coded_block[xy + 1 ] =
2257 s->coded_block[xy + wrap] =
2258 s->coded_block[xy + 1 + wrap] = 0;
2261 wrap = s->mb_stride;
2262 xy = s->mb_x + s->mb_y * wrap;
2264 s->dc_val[2][xy] = 1024;
2266 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2267 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2269 s->mbintra_table[xy]= 0;
2272 /* generic function called after a macroblock has been parsed by the
2273 decoder or after it has been encoded by the encoder.
2275 Important variables used:
2276 s->mb_intra : true if intra macroblock
2277 s->mv_dir : motion vector direction
2278 s->mv_type : motion vector type
2279 s->mv : motion vector
2280 s->interlaced_dct : true if interlaced dct used (mpeg2)
2282 static av_always_inline
2283 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2284 int lowres_flag, int is_mpeg12)
2286 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2287 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2288 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2292 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2293 /* save DCT coefficients */
2295 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2296 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2298 for(j=0; j<64; j++){
2299 *dct++ = block[i][s->dsp.idct_permutation[j]];
2300 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2302 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2306 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2308 /* update DC predictors for P macroblocks */
2310 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2311 if(s->mbintra_table[mb_xy])
2312 ff_clean_intra_table_entries(s);
2316 s->last_dc[2] = 128 << s->intra_dc_precision;
2319 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2320 s->mbintra_table[mb_xy]=1;
2322 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2323 uint8_t *dest_y, *dest_cb, *dest_cr;
2324 int dct_linesize, dct_offset;
2325 op_pixels_func (*op_pix)[4];
2326 qpel_mc_func (*op_qpix)[16];
2327 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2328 const int uvlinesize = s->current_picture.f.linesize[1];
2329 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2330 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2332 /* avoid copy if macroblock skipped in last frame too */
2333 /* skip only during decoding as we might trash the buffers during encoding a bit */
2335 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2337 if (s->mb_skipped) {
2339 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2341 } else if(!s->current_picture.f.reference) {
2344 *mbskip_ptr = 0; /* not skipped */
2348 dct_linesize = linesize << s->interlaced_dct;
2349 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2353 dest_cb= s->dest[1];
2354 dest_cr= s->dest[2];
2356 dest_y = s->b_scratchpad;
2357 dest_cb= s->b_scratchpad+16*linesize;
2358 dest_cr= s->b_scratchpad+32*linesize;
2362 /* motion handling */
2363 /* decoding or more than one mb_type (MC was already done otherwise) */
2366 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2367 if (s->mv_dir & MV_DIR_FORWARD) {
2368 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
2370 if (s->mv_dir & MV_DIR_BACKWARD) {
2371 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
2376 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2378 if (s->mv_dir & MV_DIR_FORWARD) {
2379 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2380 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2382 if (s->mv_dir & MV_DIR_BACKWARD) {
2383 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2386 op_qpix= s->me.qpel_put;
2387 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2388 op_pix = s->dsp.put_pixels_tab;
2390 op_pix = s->dsp.put_no_rnd_pixels_tab;
2392 if (s->mv_dir & MV_DIR_FORWARD) {
2393 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2394 op_pix = s->dsp.avg_pixels_tab;
2395 op_qpix= s->me.qpel_avg;
2397 if (s->mv_dir & MV_DIR_BACKWARD) {
2398 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2403 /* skip dequant / idct if we are really late ;) */
2404 if(s->avctx->skip_idct){
2405 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2406 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2407 || s->avctx->skip_idct >= AVDISCARD_ALL)
2411 /* add dct residue */
2412 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2413 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2414 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2415 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2416 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2417 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2419 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2420 if (s->chroma_y_shift){
2421 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2422 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2426 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2427 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2428 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2429 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2432 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2433 add_dct(s, block[0], 0, dest_y , dct_linesize);
2434 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2435 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2436 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2438 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2439 if(s->chroma_y_shift){//Chroma420
2440 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2441 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2444 dct_linesize = uvlinesize << s->interlaced_dct;
2445 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2447 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2448 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2449 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2450 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2451 if(!s->chroma_x_shift){//Chroma444
2452 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2453 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2454 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2455 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2460 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2461 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2464 /* dct only in intra block */
2465 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2466 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2467 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2468 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2469 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2471 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2472 if(s->chroma_y_shift){
2473 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2474 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2478 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2479 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2480 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2481 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2485 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2486 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2487 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2488 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2490 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2491 if(s->chroma_y_shift){
2492 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2493 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2496 dct_linesize = uvlinesize << s->interlaced_dct;
2497 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2499 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2500 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2501 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2502 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2503 if(!s->chroma_x_shift){//Chroma444
2504 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2505 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2506 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2507 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2515 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2516 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2517 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2522 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2524 if(s->out_format == FMT_MPEG1) {
2525 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2526 else MPV_decode_mb_internal(s, block, 0, 1);
2529 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2530 else MPV_decode_mb_internal(s, block, 0, 0);
2534 * @param h is the normal height, this will be reduced automatically if needed for the last row
2536 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2537 const int field_pic= s->picture_structure != PICT_FRAME;
2543 if (!s->avctx->hwaccel
2544 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2545 && s->unrestricted_mv
2546 && s->current_picture.f.reference
2548 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2549 int sides = 0, edge_h;
2550 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2551 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2552 if (y==0) sides |= EDGE_TOP;
2553 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2555 edge_h= FFMIN(h, s->v_edge_pos - y);
2557 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2558 s->linesize, s->h_edge_pos, edge_h,
2559 EDGE_WIDTH, EDGE_WIDTH, sides);
2560 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2561 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2562 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2563 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2564 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2565 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2568 h= FFMIN(h, s->avctx->height - y);
2570 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2572 if (s->avctx->draw_horiz_band) {
2574 int offset[AV_NUM_DATA_POINTERS];
2577 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2578 src= (AVFrame*)s->current_picture_ptr;
2579 else if(s->last_picture_ptr)
2580 src= (AVFrame*)s->last_picture_ptr;
2584 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2585 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2588 offset[0]= y * s->linesize;
2590 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2591 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2597 s->avctx->draw_horiz_band(s->avctx, src, offset,
2598 y, s->picture_structure, h);
2602 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2603 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2604 const int uvlinesize = s->current_picture.f.linesize[1];
2605 const int mb_size= 4 - s->avctx->lowres;
2607 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2608 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2609 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2610 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2611 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2612 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2613 //block_index is not used by mpeg2, so it is not affected by chroma_format
2615 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2616 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2617 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2619 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2621 if(s->picture_structure==PICT_FRAME){
2622 s->dest[0] += s->mb_y * linesize << mb_size;
2623 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2624 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2626 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2627 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2628 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2629 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2634 void ff_mpeg_flush(AVCodecContext *avctx){
2636 MpegEncContext *s = avctx->priv_data;
2638 if(s==NULL || s->picture==NULL)
2641 for(i=0; i<s->picture_count; i++){
2642 if (s->picture[i].f.data[0] &&
2643 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2644 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2645 free_frame_buffer(s, &s->picture[i]);
2647 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2649 s->mb_x= s->mb_y= 0;
2651 s->parse_context.state= -1;
2652 s->parse_context.frame_start_found= 0;
2653 s->parse_context.overread= 0;
2654 s->parse_context.overread_index= 0;
2655 s->parse_context.index= 0;
2656 s->parse_context.last_index= 0;
2657 s->bitstream_buffer_size=0;
2661 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2662 DCTELEM *block, int n, int qscale)
2664 int i, level, nCoeffs;
2665 const uint16_t *quant_matrix;
2667 nCoeffs= s->block_last_index[n];
2670 block[0] = block[0] * s->y_dc_scale;
2672 block[0] = block[0] * s->c_dc_scale;
2673 /* XXX: only mpeg1 */
2674 quant_matrix = s->intra_matrix;
2675 for(i=1;i<=nCoeffs;i++) {
2676 int j= s->intra_scantable.permutated[i];
2681 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2682 level = (level - 1) | 1;
2685 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2686 level = (level - 1) | 1;
2693 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2694 DCTELEM *block, int n, int qscale)
2696 int i, level, nCoeffs;
2697 const uint16_t *quant_matrix;
2699 nCoeffs= s->block_last_index[n];
2701 quant_matrix = s->inter_matrix;
2702 for(i=0; i<=nCoeffs; i++) {
2703 int j= s->intra_scantable.permutated[i];
2708 level = (((level << 1) + 1) * qscale *
2709 ((int) (quant_matrix[j]))) >> 4;
2710 level = (level - 1) | 1;
2713 level = (((level << 1) + 1) * qscale *
2714 ((int) (quant_matrix[j]))) >> 4;
2715 level = (level - 1) | 1;
2722 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2723 DCTELEM *block, int n, int qscale)
2725 int i, level, nCoeffs;
2726 const uint16_t *quant_matrix;
2728 if(s->alternate_scan) nCoeffs= 63;
2729 else nCoeffs= s->block_last_index[n];
2732 block[0] = block[0] * s->y_dc_scale;
2734 block[0] = block[0] * s->c_dc_scale;
2735 quant_matrix = s->intra_matrix;
2736 for(i=1;i<=nCoeffs;i++) {
2737 int j= s->intra_scantable.permutated[i];
2742 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2745 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2752 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2753 DCTELEM *block, int n, int qscale)
2755 int i, level, nCoeffs;
2756 const uint16_t *quant_matrix;
2759 if(s->alternate_scan) nCoeffs= 63;
2760 else nCoeffs= s->block_last_index[n];
2763 block[0] = block[0] * s->y_dc_scale;
2765 block[0] = block[0] * s->c_dc_scale;
2766 quant_matrix = s->intra_matrix;
2767 for(i=1;i<=nCoeffs;i++) {
2768 int j= s->intra_scantable.permutated[i];
2773 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2776 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2785 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2786 DCTELEM *block, int n, int qscale)
2788 int i, level, nCoeffs;
2789 const uint16_t *quant_matrix;
2792 if(s->alternate_scan) nCoeffs= 63;
2793 else nCoeffs= s->block_last_index[n];
2795 quant_matrix = s->inter_matrix;
2796 for(i=0; i<=nCoeffs; i++) {
2797 int j= s->intra_scantable.permutated[i];
2802 level = (((level << 1) + 1) * qscale *
2803 ((int) (quant_matrix[j]))) >> 4;
2806 level = (((level << 1) + 1) * qscale *
2807 ((int) (quant_matrix[j]))) >> 4;
2816 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2817 DCTELEM *block, int n, int qscale)
2819 int i, level, qmul, qadd;
2822 assert(s->block_last_index[n]>=0);
2828 block[0] = block[0] * s->y_dc_scale;
2830 block[0] = block[0] * s->c_dc_scale;
2831 qadd = (qscale - 1) | 1;
2838 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2840 for(i=1; i<=nCoeffs; i++) {
2844 level = level * qmul - qadd;
2846 level = level * qmul + qadd;
2853 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2854 DCTELEM *block, int n, int qscale)
2856 int i, level, qmul, qadd;
2859 assert(s->block_last_index[n]>=0);
2861 qadd = (qscale - 1) | 1;
2864 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2866 for(i=0; i<=nCoeffs; i++) {
2870 level = level * qmul - qadd;
2872 level = level * qmul + qadd;
2880 * set qscale and update qscale dependent variables.
2882 void ff_set_qscale(MpegEncContext * s, int qscale)
2886 else if (qscale > 31)
2890 s->chroma_qscale= s->chroma_qscale_table[qscale];
2892 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2893 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2896 void MPV_report_decode_progress(MpegEncContext *s)
2898 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2899 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);