2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
63 /* enable all paranoid tests for rounding, overflows, etc... */
69 static const uint8_t ff_default_chroma_qscale_table[32] = {
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
72 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
75 const uint8_t ff_mpeg1_dc_scale_table[128] = {
76 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
84 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
87 static const uint8_t mpeg2_dc_scale_table1[128] = {
88 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
96 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
99 static const uint8_t mpeg2_dc_scale_table2[128] = {
100 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
108 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
111 static const uint8_t mpeg2_dc_scale_table3[128] = {
112 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
120 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
124 ff_mpeg1_dc_scale_table,
125 mpeg2_dc_scale_table1,
126 mpeg2_dc_scale_table2,
127 mpeg2_dc_scale_table3,
130 const enum PixelFormat ff_pixfmt_list_420[] = {
135 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
143 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
145 uint32_t * restrict state)
153 for (i = 0; i < 3; i++) {
154 uint32_t tmp = *state << 8;
155 *state = tmp + *(p++);
156 if (tmp == 0x100 || p == end)
161 if (p[-1] > 1 ) p += 3;
162 else if (p[-2] ) p += 2;
163 else if (p[-3]|(p[-1]-1)) p++;
170 p = FFMIN(p, end) - 4;
176 /* init common dct for both encoder and decoder */
177 av_cold int ff_dct_common_init(MpegEncContext *s)
179 dsputil_init(&s->dsp, s->avctx);
181 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
182 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
183 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
184 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
186 if (s->flags & CODEC_FLAG_BITEXACT)
187 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
188 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
191 MPV_common_init_mmx(s);
193 MPV_common_init_axp(s);
195 MPV_common_init_mlib(s);
197 MPV_common_init_mmi(s);
199 MPV_common_init_arm(s);
201 MPV_common_init_altivec(s);
203 MPV_common_init_bfin(s);
206 /* load & permutate scantables
207 * note: only wmv uses different ones
209 if (s->alternate_scan) {
210 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
211 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
213 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
214 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
216 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
217 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
222 void ff_copy_picture(Picture *dst, Picture *src)
225 dst->f.type = FF_BUFFER_TYPE_COPY;
229 * Release a frame buffer
231 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
233 /* Windows Media Image codecs allocate internal buffers with different
234 * dimensions; ignore user defined callbacks for these
236 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
237 ff_thread_release_buffer(s->avctx, (AVFrame *) pic);
239 avcodec_default_release_buffer(s->avctx, (AVFrame *) pic);
240 av_freep(&pic->f.hwaccel_picture_private);
244 * Allocate a frame buffer
246 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
250 if (s->avctx->hwaccel) {
251 assert(!pic->f.hwaccel_picture_private);
252 if (s->avctx->hwaccel->priv_data_size) {
253 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
254 if (!pic->f.hwaccel_picture_private) {
255 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
261 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
262 r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic);
264 r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic);
266 if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
267 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
268 r, pic->f.age, pic->f.type, pic->f.data[0]);
269 av_freep(&pic->f.hwaccel_picture_private);
273 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
274 s->uvlinesize != pic->f.linesize[1])) {
275 av_log(s->avctx, AV_LOG_ERROR,
276 "get_buffer() failed (stride changed)\n");
277 free_frame_buffer(s, pic);
281 if (pic->f.linesize[1] != pic->f.linesize[2]) {
282 av_log(s->avctx, AV_LOG_ERROR,
283 "get_buffer() failed (uv stride mismatch)\n");
284 free_frame_buffer(s, pic);
292 * Allocate a Picture.
293 * The pixels are allocated/set by calling get_buffer() if shared = 0
295 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
297 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
299 // the + 1 is needed so memset(,,stride*height) does not sig11
301 const int mb_array_size = s->mb_stride * s->mb_height;
302 const int b8_array_size = s->b8_stride * s->mb_height * 2;
303 const int b4_array_size = s->b4_stride * s->mb_height * 4;
308 assert(pic->f.data[0]);
309 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
310 pic->f.type = FF_BUFFER_TYPE_SHARED;
312 assert(!pic->f.data[0]);
314 if (alloc_frame_buffer(s, pic) < 0)
317 s->linesize = pic->f.linesize[0];
318 s->uvlinesize = pic->f.linesize[1];
321 if (pic->f.qscale_table == NULL) {
323 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
324 mb_array_size * sizeof(int16_t), fail)
325 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
326 mb_array_size * sizeof(int16_t), fail)
327 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
328 mb_array_size * sizeof(int8_t ), fail)
331 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
332 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
333 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
334 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
336 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
337 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
339 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
340 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
341 if (s->out_format == FMT_H264) {
342 for (i = 0; i < 2; i++) {
343 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
344 2 * (b4_array_size + 4) * sizeof(int16_t),
346 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
347 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
348 4 * mb_array_size * sizeof(uint8_t), fail)
350 pic->f.motion_subsample_log2 = 2;
351 } else if (s->out_format == FMT_H263 || s->encoding ||
352 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
353 for (i = 0; i < 2; i++) {
354 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
355 2 * (b8_array_size + 4) * sizeof(int16_t),
357 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
358 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
359 4 * mb_array_size * sizeof(uint8_t), fail)
361 pic->f.motion_subsample_log2 = 3;
363 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
364 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
365 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
367 pic->f.qstride = s->mb_stride;
368 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
369 1 * sizeof(AVPanScan), fail)
372 /* It might be nicer if the application would keep track of these
373 * but it would require an API change. */
374 memmove(s->prev_pict_types + 1, s->prev_pict_types,
375 PREV_PICT_TYPES_BUFFER_SIZE-1);
376 s->prev_pict_types[0] = s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
377 if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE &&
378 s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B)
379 pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2
380 // and it is a bit tricky to skip them anyway.
384 fail: // for the FF_ALLOCZ_OR_GOTO macro
386 free_frame_buffer(s, pic);
391 * Deallocate a picture.
393 static void free_picture(MpegEncContext *s, Picture *pic)
397 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
398 free_frame_buffer(s, pic);
401 av_freep(&pic->mb_var);
402 av_freep(&pic->mc_mb_var);
403 av_freep(&pic->mb_mean);
404 av_freep(&pic->f.mbskip_table);
405 av_freep(&pic->qscale_table_base);
406 av_freep(&pic->mb_type_base);
407 av_freep(&pic->f.dct_coeff);
408 av_freep(&pic->f.pan_scan);
409 pic->f.mb_type = NULL;
410 for (i = 0; i < 2; i++) {
411 av_freep(&pic->motion_val_base[i]);
412 av_freep(&pic->f.ref_index[i]);
415 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
416 for (i = 0; i < 4; i++) {
418 pic->f.data[i] = NULL;
424 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
426 int y_size = s->b8_stride * (2 * s->mb_height + 1);
427 int c_size = s->mb_stride * (s->mb_height + 1);
428 int yc_size = y_size + 2 * c_size;
431 // edge emu needs blocksize + filter length - 1
432 // (= 17x17 for halfpel / 21x21 for h264)
433 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
434 (s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance
436 // FIXME should be linesize instead of s->width * 2
437 // but that is not known before get_buffer()
438 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
439 (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
440 s->me.temp = s->me.scratchpad;
441 s->rd_scratchpad = s->me.scratchpad;
442 s->b_scratchpad = s->me.scratchpad;
443 s->obmc_scratchpad = s->me.scratchpad + 16;
445 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
446 ME_MAP_SIZE * sizeof(uint32_t), fail)
447 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
448 ME_MAP_SIZE * sizeof(uint32_t), fail)
449 if (s->avctx->noise_reduction) {
450 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
451 2 * 64 * sizeof(int), fail)
454 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
455 s->block = s->blocks[0];
457 for (i = 0; i < 12; i++) {
458 s->pblocks[i] = &s->block[i];
461 if (s->out_format == FMT_H263) {
463 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
464 yc_size * sizeof(int16_t) * 16, fail);
465 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
466 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
467 s->ac_val[2] = s->ac_val[1] + c_size;
472 return -1; // free() through MPV_common_end()
475 static void free_duplicate_context(MpegEncContext *s)
480 av_freep(&s->edge_emu_buffer);
481 av_freep(&s->me.scratchpad);
485 s->obmc_scratchpad = NULL;
487 av_freep(&s->dct_error_sum);
488 av_freep(&s->me.map);
489 av_freep(&s->me.score_map);
490 av_freep(&s->blocks);
491 av_freep(&s->ac_val_base);
495 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
497 #define COPY(a) bak->a = src->a
498 COPY(edge_emu_buffer);
503 COPY(obmc_scratchpad);
510 COPY(me.map_generation);
522 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
526 // FIXME copy only needed parts
528 backup_duplicate_context(&bak, dst);
529 memcpy(dst, src, sizeof(MpegEncContext));
530 backup_duplicate_context(dst, &bak);
531 for (i = 0; i < 12; i++) {
532 dst->pblocks[i] = &dst->block[i];
534 // STOP_TIMER("update_duplicate_context")
535 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
538 int ff_mpeg_update_thread_context(AVCodecContext *dst,
539 const AVCodecContext *src)
541 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
543 if (dst == src || !s1->context_initialized)
546 // FIXME can parameters change on I-frames?
547 // in that case dst may need a reinit
548 if (!s->context_initialized) {
549 memcpy(s, s1, sizeof(MpegEncContext));
552 s->picture_range_start += MAX_PICTURE_COUNT;
553 s->picture_range_end += MAX_PICTURE_COUNT;
554 s->bitstream_buffer = NULL;
555 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
560 s->avctx->coded_height = s1->avctx->coded_height;
561 s->avctx->coded_width = s1->avctx->coded_width;
562 s->avctx->width = s1->avctx->width;
563 s->avctx->height = s1->avctx->height;
565 s->coded_picture_number = s1->coded_picture_number;
566 s->picture_number = s1->picture_number;
567 s->input_picture_number = s1->input_picture_number;
569 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
570 memcpy(&s->last_picture, &s1->last_picture,
571 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
573 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
574 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
575 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
577 memcpy(s->prev_pict_types, s1->prev_pict_types,
578 PREV_PICT_TYPES_BUFFER_SIZE);
580 // Error/bug resilience
581 s->next_p_frame_damaged = s1->next_p_frame_damaged;
582 s->workaround_bugs = s1->workaround_bugs;
583 s->padding_bug_score = s1->padding_bug_score;
586 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
587 (char *) &s1->shape - (char *) &s1->time_increment_bits);
590 s->max_b_frames = s1->max_b_frames;
591 s->low_delay = s1->low_delay;
592 s->dropable = s1->dropable;
594 // DivX handling (doesn't work)
595 s->divx_packed = s1->divx_packed;
597 if (s1->bitstream_buffer) {
598 if (s1->bitstream_buffer_size +
599 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
600 av_fast_malloc(&s->bitstream_buffer,
601 &s->allocated_bitstream_buffer_size,
602 s1->allocated_bitstream_buffer_size);
603 s->bitstream_buffer_size = s1->bitstream_buffer_size;
604 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
605 s1->bitstream_buffer_size);
606 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
607 FF_INPUT_BUFFER_PADDING_SIZE);
610 // MPEG2/interlacing info
611 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
612 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
614 if (!s1->first_field) {
615 s->last_pict_type = s1->pict_type;
616 if (s1->current_picture_ptr)
617 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
619 if (s1->pict_type != AV_PICTURE_TYPE_B) {
620 s->last_non_b_pict_type = s1->pict_type;
628 * Set the given MpegEncContext to common defaults
629 * (same for encoding and decoding).
630 * The changed fields will not depend upon the
631 * prior state of the MpegEncContext.
633 void MPV_common_defaults(MpegEncContext *s)
635 s->y_dc_scale_table =
636 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
637 s->chroma_qscale_table = ff_default_chroma_qscale_table;
638 s->progressive_frame = 1;
639 s->progressive_sequence = 1;
640 s->picture_structure = PICT_FRAME;
642 s->coded_picture_number = 0;
643 s->picture_number = 0;
644 s->input_picture_number = 0;
646 s->picture_in_gop_number = 0;
651 s->picture_range_start = 0;
652 s->picture_range_end = MAX_PICTURE_COUNT;
656 * Set the given MpegEncContext to defaults for decoding.
657 * the changed fields will not depend upon
658 * the prior state of the MpegEncContext.
660 void MPV_decode_defaults(MpegEncContext *s)
662 MPV_common_defaults(s);
666 * init common structure for both encoder and decoder.
667 * this assumes that some variables like width/height are already set
669 av_cold int MPV_common_init(MpegEncContext *s)
671 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y,
672 threads = (s->encoding ||
674 s->avctx->active_thread_type & FF_THREAD_SLICE)) ?
675 s->avctx->thread_count : 1;
677 if (s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
678 s->mb_height = (s->height + 31) / 32 * 2;
679 else if (s->codec_id != CODEC_ID_H264)
680 s->mb_height = (s->height + 15) / 16;
682 if (s->avctx->pix_fmt == PIX_FMT_NONE) {
683 av_log(s->avctx, AV_LOG_ERROR,
684 "decoding to PIX_FMT_NONE is not supported.\n");
688 if ((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
689 (s->avctx->thread_count > MAX_THREADS ||
690 (s->avctx->thread_count > s->mb_height && s->mb_height))) {
691 int max_threads = FFMIN(MAX_THREADS, s->mb_height);
692 av_log(s->avctx, AV_LOG_WARNING,
693 "too many threads (%d), reducing to %d\n",
694 s->avctx->thread_count, max_threads);
695 threads = max_threads;
698 if ((s->width || s->height) &&
699 av_image_check_size(s->width, s->height, 0, s->avctx))
702 ff_dct_common_init(s);
704 s->flags = s->avctx->flags;
705 s->flags2 = s->avctx->flags2;
707 s->mb_width = (s->width + 15) / 16;
708 s->mb_stride = s->mb_width + 1;
709 s->b8_stride = s->mb_width * 2 + 1;
710 s->b4_stride = s->mb_width * 4 + 1;
711 mb_array_size = s->mb_height * s->mb_stride;
712 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
714 /* set chroma shifts */
715 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
716 &(s->chroma_y_shift) );
718 /* set default edge pos, will be overriden in decode_header if needed */
719 s->h_edge_pos = s->mb_width * 16;
720 s->v_edge_pos = s->mb_height * 16;
722 s->mb_num = s->mb_width * s->mb_height;
727 s->block_wrap[3] = s->b8_stride;
729 s->block_wrap[5] = s->mb_stride;
731 y_size = s->b8_stride * (2 * s->mb_height + 1);
732 c_size = s->mb_stride * (s->mb_height + 1);
733 yc_size = y_size + 2 * c_size;
735 /* convert fourcc to upper case */
736 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
737 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
739 s->avctx->coded_frame = (AVFrame*)&s->current_picture;
741 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
742 for (y = 0; y < s->mb_height; y++)
743 for (x = 0; x < s->mb_width; x++)
744 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
746 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
749 /* Allocate MV tables */
750 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
751 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
752 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
753 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
754 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
755 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
756 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
757 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
758 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
759 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
760 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
761 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
763 if(s->msmpeg4_version){
764 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
766 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
768 /* Allocate MB type table */
769 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
771 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
773 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
774 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix , 64*32 * sizeof(int), fail)
775 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
776 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16 , 64*32*2 * sizeof(uint16_t), fail)
777 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
778 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16 , 64*32*2 * sizeof(uint16_t), fail)
779 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
780 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
782 if(s->avctx->noise_reduction){
783 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
787 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
788 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
789 s->picture_count * sizeof(Picture), fail);
790 for (i = 0; i < s->picture_count; i++) {
791 avcodec_get_frame_defaults((AVFrame *) &s->picture[i]);
794 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
796 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
797 /* interlaced direct mode decoding tables */
798 for (i = 0; i < 2; i++) {
800 for (j = 0; j < 2; j++) {
801 for (k = 0; k < 2; k++) {
802 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
803 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
805 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
806 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
807 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
809 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
812 if (s->out_format == FMT_H263) {
814 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
815 s->coded_block = s->coded_block_base + s->b8_stride + 1;
817 /* cbp, ac_pred, pred_dir */
818 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
819 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
822 if (s->h263_pred || s->h263_plus || !s->encoding) {
824 // MN: we need these for error resilience of intra-frames
825 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
826 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
827 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
828 s->dc_val[2] = s->dc_val[1] + c_size;
829 for (i = 0; i < yc_size; i++)
830 s->dc_val_base[i] = 1024;
833 /* which mb is a intra block */
834 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
835 memset(s->mbintra_table, 1, mb_array_size);
837 /* init macroblock skip table */
838 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
839 // Note the + 1 is for a quicker mpeg4 slice_end detection
840 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
842 s->parse_context.state = -1;
843 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
844 s->avctx->debug_mv) {
845 s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
846 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
847 s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
848 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
849 s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
850 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
853 s->context_initialized = 1;
854 s->thread_context[0] = s;
856 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
857 for (i = 1; i < threads; i++) {
858 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
859 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
862 for (i = 0; i < threads; i++) {
863 if (init_duplicate_context(s->thread_context[i], s) < 0)
865 s->thread_context[i]->start_mb_y = (s->mb_height*(i ) + s->avctx->thread_count / 2) / s->avctx->thread_count;
866 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count / 2) / s->avctx->thread_count;
869 if (init_duplicate_context(s, s) < 0)
872 s->end_mb_y = s->mb_height;
881 /* init common structure for both encoder and decoder */
882 void MPV_common_end(MpegEncContext *s)
886 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type & FF_THREAD_SLICE)) {
887 for (i = 0; i < s->avctx->thread_count; i++) {
888 free_duplicate_context(s->thread_context[i]);
890 for (i = 1; i < s->avctx->thread_count; i++) {
891 av_freep(&s->thread_context[i]);
893 } else free_duplicate_context(s);
895 av_freep(&s->parse_context.buffer);
896 s->parse_context.buffer_size = 0;
898 av_freep(&s->mb_type);
899 av_freep(&s->p_mv_table_base);
900 av_freep(&s->b_forw_mv_table_base);
901 av_freep(&s->b_back_mv_table_base);
902 av_freep(&s->b_bidir_forw_mv_table_base);
903 av_freep(&s->b_bidir_back_mv_table_base);
904 av_freep(&s->b_direct_mv_table_base);
905 s->p_mv_table = NULL;
906 s->b_forw_mv_table = NULL;
907 s->b_back_mv_table = NULL;
908 s->b_bidir_forw_mv_table = NULL;
909 s->b_bidir_back_mv_table = NULL;
910 s->b_direct_mv_table = NULL;
911 for (i = 0; i < 2; i++) {
912 for (j = 0; j < 2; j++) {
913 for (k = 0; k < 2; k++) {
914 av_freep(&s->b_field_mv_table_base[i][j][k]);
915 s->b_field_mv_table[i][j][k] = NULL;
917 av_freep(&s->b_field_select_table[i][j]);
918 av_freep(&s->p_field_mv_table_base[i][j]);
919 s->p_field_mv_table[i][j] = NULL;
921 av_freep(&s->p_field_select_table[i]);
924 av_freep(&s->dc_val_base);
925 av_freep(&s->coded_block_base);
926 av_freep(&s->mbintra_table);
927 av_freep(&s->cbp_table);
928 av_freep(&s->pred_dir_table);
930 av_freep(&s->mbskip_table);
931 av_freep(&s->prev_pict_types);
932 av_freep(&s->bitstream_buffer);
933 s->allocated_bitstream_buffer_size = 0;
935 av_freep(&s->avctx->stats_out);
936 av_freep(&s->ac_stats);
937 av_freep(&s->error_status_table);
938 av_freep(&s->mb_index2xy);
939 av_freep(&s->lambda_table);
940 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
941 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
942 s->q_chroma_intra_matrix= NULL;
943 s->q_chroma_intra_matrix16= NULL;
944 av_freep(&s->q_intra_matrix);
945 av_freep(&s->q_inter_matrix);
946 av_freep(&s->q_intra_matrix16);
947 av_freep(&s->q_inter_matrix16);
948 av_freep(&s->input_picture);
949 av_freep(&s->reordered_input_picture);
950 av_freep(&s->dct_offset);
952 if (s->picture && !s->avctx->internal->is_copy) {
953 for (i = 0; i < s->picture_count; i++) {
954 free_picture(s, &s->picture[i]);
957 av_freep(&s->picture);
958 s->context_initialized = 0;
959 s->last_picture_ptr =
960 s->next_picture_ptr =
961 s->current_picture_ptr = NULL;
962 s->linesize = s->uvlinesize = 0;
964 for (i = 0; i < 3; i++)
965 av_freep(&s->visualization_buffer[i]);
967 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
968 avcodec_default_free_buffers(s->avctx);
971 void init_rl(RLTable *rl,
972 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
974 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
975 uint8_t index_run[MAX_RUN + 1];
976 int last, run, level, start, end, i;
978 /* If table is static, we can quit if rl->max_level[0] is not NULL */
979 if (static_store && rl->max_level[0])
982 /* compute max_level[], max_run[] and index_run[] */
983 for (last = 0; last < 2; last++) {
992 memset(max_level, 0, MAX_RUN + 1);
993 memset(max_run, 0, MAX_LEVEL + 1);
994 memset(index_run, rl->n, MAX_RUN + 1);
995 for (i = start; i < end; i++) {
996 run = rl->table_run[i];
997 level = rl->table_level[i];
998 if (index_run[run] == rl->n)
1000 if (level > max_level[run])
1001 max_level[run] = level;
1002 if (run > max_run[level])
1003 max_run[level] = run;
1006 rl->max_level[last] = static_store[last];
1008 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1009 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1011 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1013 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1014 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1016 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1018 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1019 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1023 void init_vlc_rl(RLTable *rl)
1027 for (q = 0; q < 32; q++) {
1029 int qadd = (q - 1) | 1;
1035 for (i = 0; i < rl->vlc.table_size; i++) {
1036 int code = rl->vlc.table[i][0];
1037 int len = rl->vlc.table[i][1];
1040 if (len == 0) { // illegal code
1043 } else if (len < 0) { // more bits needed
1047 if (code == rl->n) { // esc
1051 run = rl->table_run[code] + 1;
1052 level = rl->table_level[code] * qmul + qadd;
1053 if (code >= rl->last) run += 192;
1056 rl->rl_vlc[q][i].len = len;
1057 rl->rl_vlc[q][i].level = level;
1058 rl->rl_vlc[q][i].run = run;
1063 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1067 /* release non reference frames */
1068 for (i = 0; i < s->picture_count; i++) {
1069 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1070 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1071 (remove_current || &s->picture[i] != s->current_picture_ptr)
1072 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1073 free_frame_buffer(s, &s->picture[i]);
1078 int ff_find_unused_picture(MpegEncContext *s, int shared)
1083 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1084 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1088 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1089 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
1092 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1093 if (s->picture[i].f.data[0] == NULL)
1098 av_log(s->avctx, AV_LOG_FATAL,
1099 "Internal error, picture buffer overflow\n");
1100 /* We could return -1, but the codec would crash trying to draw into a
1101 * non-existing frame anyway. This is safer than waiting for a random crash.
1102 * Also the return of this is never useful, an encoder must only allocate
1103 * as much as allowed in the specification. This has no relationship to how
1104 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1105 * enough for such valid streams).
1106 * Plus, a decoder has to check stream validity and remove frames if too
1107 * many reference frames are around. Waiting for "OOM" is not correct at
1108 * all. Similarly, missing reference frames have to be replaced by
1109 * interpolated/MC frames, anything else is a bug in the codec ...
1115 static void update_noise_reduction(MpegEncContext *s){
1118 for(intra=0; intra<2; intra++){
1119 if(s->dct_count[intra] > (1<<16)){
1120 for(i=0; i<64; i++){
1121 s->dct_error_sum[intra][i] >>=1;
1123 s->dct_count[intra] >>= 1;
1126 for(i=0; i<64; i++){
1127 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1133 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1135 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1141 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1143 /* mark&release old frames */
1144 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f.data[0]) {
1145 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1146 if (s->last_picture_ptr->owner2 == s)
1147 free_frame_buffer(s, s->last_picture_ptr);
1149 /* release forgotten pictures */
1150 /* if(mpeg124/h263) */
1152 for(i=0; i<s->picture_count; i++){
1153 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].f.reference) {
1154 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1155 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1156 free_frame_buffer(s, &s->picture[i]);
1164 ff_release_unused_pictures(s, 1);
1166 if (s->current_picture_ptr && s->current_picture_ptr->f.data[0] == NULL)
1167 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
1169 i= ff_find_unused_picture(s, 0);
1172 pic= &s->picture[i];
1175 pic->f.reference = 0;
1177 if (s->codec_id == CODEC_ID_H264)
1178 pic->f.reference = s->picture_structure;
1179 else if (s->pict_type != AV_PICTURE_TYPE_B)
1180 pic->f.reference = 3;
1183 pic->f.coded_picture_number = s->coded_picture_number++;
1185 if(ff_alloc_picture(s, pic, 0) < 0)
1188 s->current_picture_ptr= pic;
1189 //FIXME use only the vars from current_pic
1190 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1191 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1192 if(s->picture_structure != PICT_FRAME)
1193 s->current_picture_ptr->f.top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1195 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && !s->progressive_sequence;
1196 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1199 s->current_picture_ptr->f.pict_type = s->pict_type;
1200 // if(s->flags && CODEC_FLAG_QSCALE)
1201 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1202 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1204 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1206 if (s->pict_type != AV_PICTURE_TYPE_B) {
1207 s->last_picture_ptr= s->next_picture_ptr;
1209 s->next_picture_ptr= s->current_picture_ptr;
1211 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1212 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1213 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1214 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1215 s->pict_type, s->dropable);*/
1217 if(s->codec_id != CODEC_ID_H264){
1218 if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f.data[0] == NULL) &&
1219 (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
1220 if (s->pict_type != AV_PICTURE_TYPE_I)
1221 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1222 else if (s->picture_structure != PICT_FRAME)
1223 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
1225 /* Allocate a dummy frame */
1226 i= ff_find_unused_picture(s, 0);
1229 s->last_picture_ptr= &s->picture[i];
1230 s->last_picture_ptr->f.key_frame = 0;
1231 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1234 if(s->codec_id == CODEC_ID_FLV1 || s->codec_id == CODEC_ID_H263){
1235 for(i=0; i<avctx->height; i++)
1236 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1239 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
1240 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
1242 if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.data[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) {
1243 /* Allocate a dummy frame */
1244 i= ff_find_unused_picture(s, 0);
1247 s->next_picture_ptr= &s->picture[i];
1248 s->next_picture_ptr->f.key_frame = 0;
1249 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1251 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
1252 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
1256 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1257 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1259 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f.data[0]));
1261 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1264 if(s->picture_structure == PICT_BOTTOM_FIELD){
1265 s->current_picture.f.data[i] += s->current_picture.f.linesize[i];
1267 s->current_picture.f.linesize[i] *= 2;
1268 s->last_picture.f.linesize[i] *= 2;
1269 s->next_picture.f.linesize[i] *= 2;
1273 s->err_recognition = avctx->err_recognition;
1275 /* set dequantizer, we can't do it during init as it might change for mpeg4
1276 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1277 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1278 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1279 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1280 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1281 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1282 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1284 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1285 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1288 if(s->dct_error_sum){
1289 assert(s->avctx->noise_reduction && s->encoding);
1291 update_noise_reduction(s);
1294 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1295 return ff_xvmc_field_start(s, avctx);
1300 /* generic function for encode/decode called after a frame has been coded/decoded */
1301 void MPV_frame_end(MpegEncContext *s)
1304 /* redraw edges for the frame if decoding didn't complete */
1305 //just to make sure that all data is rendered.
1306 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1307 ff_xvmc_field_end(s);
1308 }else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND))
1309 && !s->avctx->hwaccel
1310 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1311 && s->unrestricted_mv
1312 && s->current_picture.f.reference
1314 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1315 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1316 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1317 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1318 s->h_edge_pos , s->v_edge_pos,
1319 EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
1320 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1321 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1322 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1323 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1324 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1325 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1330 s->last_pict_type = s->pict_type;
1331 s->last_lambda_for[s->pict_type] = s->current_picture_ptr->f.quality;
1332 if(s->pict_type!=AV_PICTURE_TYPE_B){
1333 s->last_non_b_pict_type= s->pict_type;
1336 /* copy back current_picture variables */
1337 for(i=0; i<MAX_PICTURE_COUNT; i++){
1338 if(s->picture[i].f.data[0] == s->current_picture.f.data[0]){
1339 s->picture[i]= s->current_picture;
1343 assert(i<MAX_PICTURE_COUNT);
1347 /* release non-reference frames */
1348 for(i=0; i<s->picture_count; i++){
1349 if (s->picture[i].f.data[0] && !s->picture[i].f.reference /*&& s->picture[i].type != FF_BUFFER_TYPE_SHARED*/) {
1350 free_frame_buffer(s, &s->picture[i]);
1354 // clear copies, to avoid confusion
1356 memset(&s->last_picture, 0, sizeof(Picture));
1357 memset(&s->next_picture, 0, sizeof(Picture));
1358 memset(&s->current_picture, 0, sizeof(Picture));
1360 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1362 if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
1363 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
1368 * Draw a line from (ex, ey) -> (sx, sy).
1369 * @param w width of the image
1370 * @param h height of the image
1371 * @param stride stride/linesize of the image
1372 * @param color color of the arrow
1374 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1377 sx= av_clip(sx, 0, w-1);
1378 sy= av_clip(sy, 0, h-1);
1379 ex= av_clip(ex, 0, w-1);
1380 ey= av_clip(ey, 0, h-1);
1382 buf[sy*stride + sx]+= color;
1384 if(FFABS(ex - sx) > FFABS(ey - sy)){
1386 FFSWAP(int, sx, ex);
1387 FFSWAP(int, sy, ey);
1389 buf+= sx + sy*stride;
1391 f= ((ey-sy)<<16)/ex;
1392 for(x= 0; x <= ex; x++){
1395 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1396 buf[(y+1)*stride + x]+= (color* fr )>>16;
1400 FFSWAP(int, sx, ex);
1401 FFSWAP(int, sy, ey);
1403 buf+= sx + sy*stride;
1405 if(ey) f= ((ex-sx)<<16)/ey;
1407 for(y= 0; y <= ey; y++){
1410 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1411 buf[y*stride + x+1]+= (color* fr )>>16;
1417 * Draw an arrow from (ex, ey) -> (sx, sy).
1418 * @param w width of the image
1419 * @param h height of the image
1420 * @param stride stride/linesize of the image
1421 * @param color color of the arrow
1423 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1426 sx= av_clip(sx, -100, w+100);
1427 sy= av_clip(sy, -100, h+100);
1428 ex= av_clip(ex, -100, w+100);
1429 ey= av_clip(ey, -100, h+100);
1434 if(dx*dx + dy*dy > 3*3){
1437 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1439 //FIXME subpixel accuracy
1440 rx= ROUNDED_DIV(rx*3<<4, length);
1441 ry= ROUNDED_DIV(ry*3<<4, length);
1443 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1444 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1446 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1450 * Print debuging info for the given picture.
1452 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1454 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1456 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1459 av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1460 av_get_picture_type_char(pict->pict_type));
1461 for(y=0; y<s->mb_height; y++){
1462 for(x=0; x<s->mb_width; x++){
1463 if(s->avctx->debug&FF_DEBUG_SKIP){
1464 int count= s->mbskip_table[x + y*s->mb_stride];
1465 if(count>9) count=9;
1466 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1468 if(s->avctx->debug&FF_DEBUG_QP){
1469 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1471 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1472 int mb_type= pict->mb_type[x + y*s->mb_stride];
1473 //Type & MV direction
1475 av_log(s->avctx, AV_LOG_DEBUG, "P");
1476 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1477 av_log(s->avctx, AV_LOG_DEBUG, "A");
1478 else if(IS_INTRA4x4(mb_type))
1479 av_log(s->avctx, AV_LOG_DEBUG, "i");
1480 else if(IS_INTRA16x16(mb_type))
1481 av_log(s->avctx, AV_LOG_DEBUG, "I");
1482 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1483 av_log(s->avctx, AV_LOG_DEBUG, "d");
1484 else if(IS_DIRECT(mb_type))
1485 av_log(s->avctx, AV_LOG_DEBUG, "D");
1486 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1487 av_log(s->avctx, AV_LOG_DEBUG, "g");
1488 else if(IS_GMC(mb_type))
1489 av_log(s->avctx, AV_LOG_DEBUG, "G");
1490 else if(IS_SKIP(mb_type))
1491 av_log(s->avctx, AV_LOG_DEBUG, "S");
1492 else if(!USES_LIST(mb_type, 1))
1493 av_log(s->avctx, AV_LOG_DEBUG, ">");
1494 else if(!USES_LIST(mb_type, 0))
1495 av_log(s->avctx, AV_LOG_DEBUG, "<");
1497 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1498 av_log(s->avctx, AV_LOG_DEBUG, "X");
1503 av_log(s->avctx, AV_LOG_DEBUG, "+");
1504 else if(IS_16X8(mb_type))
1505 av_log(s->avctx, AV_LOG_DEBUG, "-");
1506 else if(IS_8X16(mb_type))
1507 av_log(s->avctx, AV_LOG_DEBUG, "|");
1508 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1509 av_log(s->avctx, AV_LOG_DEBUG, " ");
1511 av_log(s->avctx, AV_LOG_DEBUG, "?");
1514 if(IS_INTERLACED(mb_type))
1515 av_log(s->avctx, AV_LOG_DEBUG, "=");
1517 av_log(s->avctx, AV_LOG_DEBUG, " ");
1519 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1521 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1525 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1526 s->avctx->debug_mv) {
1527 const int shift= 1 + s->quarter_sample;
1531 int h_chroma_shift, v_chroma_shift, block_height;
1532 const int width = s->avctx->width;
1533 const int height= s->avctx->height;
1534 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1535 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1536 s->low_delay=0; //needed to see the vectors without trashing the buffers
1538 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1540 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1541 pict->data[i]= s->visualization_buffer[i];
1543 pict->type= FF_BUFFER_TYPE_COPY;
1546 block_height = 16>>v_chroma_shift;
1548 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1550 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1551 const int mb_index= mb_x + mb_y*s->mb_stride;
1552 if (s->avctx->debug_mv && pict->motion_val) {
1554 for(type=0; type<3; type++){
1557 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
1561 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1565 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1570 if(!USES_LIST(pict->mb_type[mb_index], direction))
1573 if(IS_8X8(pict->mb_type[mb_index])){
1576 int sx= mb_x*16 + 4 + 8*(i&1);
1577 int sy= mb_y*16 + 4 + 8*(i>>1);
1578 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1579 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1580 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1581 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1583 }else if(IS_16X8(pict->mb_type[mb_index])){
1587 int sy=mb_y*16 + 4 + 8*i;
1588 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1589 int mx=(pict->motion_val[direction][xy][0]>>shift);
1590 int my=(pict->motion_val[direction][xy][1]>>shift);
1592 if(IS_INTERLACED(pict->mb_type[mb_index]))
1595 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1597 }else if(IS_8X16(pict->mb_type[mb_index])){
1600 int sx=mb_x*16 + 4 + 8*i;
1602 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1603 int mx=(pict->motion_val[direction][xy][0]>>shift);
1604 int my=(pict->motion_val[direction][xy][1]>>shift);
1606 if(IS_INTERLACED(pict->mb_type[mb_index]))
1609 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1612 int sx= mb_x*16 + 8;
1613 int sy= mb_y*16 + 8;
1614 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1615 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1616 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1617 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1621 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1622 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1624 for(y=0; y<block_height; y++){
1625 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1626 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1629 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1630 int mb_type= pict->mb_type[mb_index];
1633 #define COLOR(theta, r)\
1634 u= (int)(128 + r*cos(theta*3.141592/180));\
1635 v= (int)(128 + r*sin(theta*3.141592/180));
1639 if(IS_PCM(mb_type)){
1641 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1643 }else if(IS_INTRA4x4(mb_type)){
1645 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1647 }else if(IS_DIRECT(mb_type)){
1649 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1651 }else if(IS_GMC(mb_type)){
1653 }else if(IS_SKIP(mb_type)){
1655 }else if(!USES_LIST(mb_type, 1)){
1657 }else if(!USES_LIST(mb_type, 0)){
1660 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1664 u*= 0x0101010101010101ULL;
1665 v*= 0x0101010101010101ULL;
1666 for(y=0; y<block_height; y++){
1667 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1668 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1672 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1673 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1674 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1676 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1678 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1680 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1681 int dm= 1 << (mv_sample_log2-2);
1683 int sx= mb_x*16 + 8*(i&1);
1684 int sy= mb_y*16 + 8*(i>>1);
1685 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1687 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1688 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1690 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1691 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1692 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1696 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1700 s->mbskip_table[mb_index]=0;
1706 static inline int hpel_motion_lowres(MpegEncContext *s,
1707 uint8_t *dest, uint8_t *src,
1708 int field_based, int field_select,
1709 int src_x, int src_y,
1710 int width, int height, int stride,
1711 int h_edge_pos, int v_edge_pos,
1712 int w, int h, h264_chroma_mc_func *pix_op,
1713 int motion_x, int motion_y)
1715 const int lowres= s->avctx->lowres;
1716 const int op_index= FFMIN(lowres, 2);
1717 const int s_mask= (2<<lowres)-1;
1721 if(s->quarter_sample){
1726 sx= motion_x & s_mask;
1727 sy= motion_y & s_mask;
1728 src_x += motion_x >> (lowres+1);
1729 src_y += motion_y >> (lowres+1);
1731 src += src_y * stride + src_x;
1733 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1734 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1735 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1736 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1737 src= s->edge_emu_buffer;
1741 sx= (sx << 2) >> lowres;
1742 sy= (sy << 2) >> lowres;
1745 pix_op[op_index](dest, src, stride, h, sx, sy);
1749 /* apply one mpeg motion vector to the three components */
1750 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1751 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1752 int field_based, int bottom_field, int field_select,
1753 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1754 int motion_x, int motion_y, int h, int mb_y)
1756 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1757 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1758 const int lowres= s->avctx->lowres;
1759 const int op_index= FFMIN(lowres-1+s->chroma_x_shift, 2);
1760 const int block_s= 8>>lowres;
1761 const int s_mask= (2<<lowres)-1;
1762 const int h_edge_pos = s->h_edge_pos >> lowres;
1763 const int v_edge_pos = s->v_edge_pos >> lowres;
1764 linesize = s->current_picture.f.linesize[0] << field_based;
1765 uvlinesize = s->current_picture.f.linesize[1] << field_based;
1767 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1773 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1776 sx= motion_x & s_mask;
1777 sy= motion_y & s_mask;
1778 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1779 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1781 if (s->out_format == FMT_H263) {
1782 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1783 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1786 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1789 uvsx = (2*mx) & s_mask;
1790 uvsy = (2*my) & s_mask;
1791 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1792 uvsrc_y = mb_y*block_s + (my >> lowres);
1794 if(s->chroma_y_shift){
1799 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1800 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1802 if(s->chroma_x_shift){
1806 uvsy = motion_y & s_mask;
1808 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1811 uvsx = motion_x & s_mask;
1812 uvsy = motion_y & s_mask;
1819 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1820 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1821 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1823 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1824 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1825 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1826 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1827 ptr_y = s->edge_emu_buffer;
1828 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1829 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1830 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1831 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1832 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1833 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1839 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
1840 dest_y += s->linesize;
1841 dest_cb+= s->uvlinesize;
1842 dest_cr+= s->uvlinesize;
1846 ptr_y += s->linesize;
1847 ptr_cb+= s->uvlinesize;
1848 ptr_cr+= s->uvlinesize;
1851 sx= (sx << 2) >> lowres;
1852 sy= (sy << 2) >> lowres;
1853 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1855 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1856 uvsx= (uvsx << 2) >> lowres;
1857 uvsy= (uvsy << 2) >> lowres;
1858 if(h >> s->chroma_y_shift){
1859 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1860 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1863 //FIXME h261 lowres loop filter
1866 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1867 uint8_t *dest_cb, uint8_t *dest_cr,
1868 uint8_t **ref_picture,
1869 h264_chroma_mc_func *pix_op,
1871 const int lowres= s->avctx->lowres;
1872 const int op_index= FFMIN(lowres, 2);
1873 const int block_s= 8>>lowres;
1874 const int s_mask= (2<<lowres)-1;
1875 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1876 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1877 int emu=0, src_x, src_y, offset, sx, sy;
1880 if(s->quarter_sample){
1885 /* In case of 8X8, we construct a single chroma motion vector
1886 with a special rounding */
1887 mx= ff_h263_round_chroma(mx);
1888 my= ff_h263_round_chroma(my);
1892 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1893 src_y = s->mb_y*block_s + (my >> (lowres+1));
1895 offset = src_y * s->uvlinesize + src_x;
1896 ptr = ref_picture[1] + offset;
1897 if(s->flags&CODEC_FLAG_EMU_EDGE){
1898 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1899 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1900 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1901 ptr= s->edge_emu_buffer;
1905 sx= (sx << 2) >> lowres;
1906 sy= (sy << 2) >> lowres;
1907 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1909 ptr = ref_picture[2] + offset;
1911 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1912 ptr= s->edge_emu_buffer;
1914 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1918 * motion compensation of a single macroblock
1920 * @param dest_y luma destination pointer
1921 * @param dest_cb chroma cb/u destination pointer
1922 * @param dest_cr chroma cr/v destination pointer
1923 * @param dir direction (0->forward, 1->backward)
1924 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1925 * @param pix_op halfpel motion compensation function (average or put normally)
1926 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1928 static inline void MPV_motion_lowres(MpegEncContext *s,
1929 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1930 int dir, uint8_t **ref_picture,
1931 h264_chroma_mc_func *pix_op)
1935 const int lowres= s->avctx->lowres;
1936 const int block_s= 8>>lowres;
1941 switch(s->mv_type) {
1943 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1945 ref_picture, pix_op,
1946 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1952 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1953 ref_picture[0], 0, 0,
1954 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1955 s->width, s->height, s->linesize,
1956 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1957 block_s, block_s, pix_op,
1958 s->mv[dir][i][0], s->mv[dir][i][1]);
1960 mx += s->mv[dir][i][0];
1961 my += s->mv[dir][i][1];
1964 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1965 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1968 if (s->picture_structure == PICT_FRAME) {
1970 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1971 1, 0, s->field_select[dir][0],
1972 ref_picture, pix_op,
1973 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1975 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1976 1, 1, s->field_select[dir][1],
1977 ref_picture, pix_op,
1978 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1980 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
1981 ref_picture = s->current_picture_ptr->f.data;
1984 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1985 0, 0, s->field_select[dir][0],
1986 ref_picture, pix_op,
1987 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1992 uint8_t ** ref2picture;
1994 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
1995 ref2picture= ref_picture;
1997 ref2picture = s->current_picture_ptr->f.data;
2000 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2001 0, 0, s->field_select[dir][i],
2002 ref2picture, pix_op,
2003 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
2005 dest_y += 2*block_s*s->linesize;
2006 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
2007 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
2011 if(s->picture_structure == PICT_FRAME){
2015 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2017 ref_picture, pix_op,
2018 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
2020 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2024 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2025 0, 0, s->picture_structure != i+1,
2026 ref_picture, pix_op,
2027 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
2029 // after put we make avg of the same block
2030 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2032 //opposite parity is always in the same frame if this is second field
2033 if(!s->first_field){
2034 ref_picture = s->current_picture_ptr->f.data;
2044 * find the lowest MB row referenced in the MVs
2046 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2048 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2049 int my, off, i, mvs;
2051 if (s->picture_structure != PICT_FRAME) goto unhandled;
2053 switch (s->mv_type) {
2067 for (i = 0; i < mvs; i++) {
2068 my = s->mv[dir][i][1]<<qpel_shift;
2069 my_max = FFMAX(my_max, my);
2070 my_min = FFMIN(my_min, my);
2073 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2075 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2077 return s->mb_height-1;
2080 /* put block[] to dest[] */
2081 static inline void put_dct(MpegEncContext *s,
2082 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2084 s->dct_unquantize_intra(s, block, i, qscale);
2085 s->dsp.idct_put (dest, line_size, block);
2088 /* add block[] to dest[] */
2089 static inline void add_dct(MpegEncContext *s,
2090 DCTELEM *block, int i, uint8_t *dest, int line_size)
2092 if (s->block_last_index[i] >= 0) {
2093 s->dsp.idct_add (dest, line_size, block);
2097 static inline void add_dequant_dct(MpegEncContext *s,
2098 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2100 if (s->block_last_index[i] >= 0) {
2101 s->dct_unquantize_inter(s, block, i, qscale);
2103 s->dsp.idct_add (dest, line_size, block);
2108 * Clean dc, ac, coded_block for the current non-intra MB.
2110 void ff_clean_intra_table_entries(MpegEncContext *s)
2112 int wrap = s->b8_stride;
2113 int xy = s->block_index[0];
2116 s->dc_val[0][xy + 1 ] =
2117 s->dc_val[0][xy + wrap] =
2118 s->dc_val[0][xy + 1 + wrap] = 1024;
2120 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2121 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2122 if (s->msmpeg4_version>=3) {
2123 s->coded_block[xy ] =
2124 s->coded_block[xy + 1 ] =
2125 s->coded_block[xy + wrap] =
2126 s->coded_block[xy + 1 + wrap] = 0;
2129 wrap = s->mb_stride;
2130 xy = s->mb_x + s->mb_y * wrap;
2132 s->dc_val[2][xy] = 1024;
2134 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2135 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2137 s->mbintra_table[xy]= 0;
2140 /* generic function called after a macroblock has been parsed by the
2141 decoder or after it has been encoded by the encoder.
2143 Important variables used:
2144 s->mb_intra : true if intra macroblock
2145 s->mv_dir : motion vector direction
2146 s->mv_type : motion vector type
2147 s->mv : motion vector
2148 s->interlaced_dct : true if interlaced dct used (mpeg2)
2150 static av_always_inline
2151 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2152 int lowres_flag, int is_mpeg12)
2154 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2155 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2156 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2160 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2161 /* save DCT coefficients */
2163 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2164 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2166 for(j=0; j<64; j++){
2167 *dct++ = block[i][s->dsp.idct_permutation[j]];
2168 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2170 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2174 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2176 /* update DC predictors for P macroblocks */
2178 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2179 if(s->mbintra_table[mb_xy])
2180 ff_clean_intra_table_entries(s);
2184 s->last_dc[2] = 128 << s->intra_dc_precision;
2187 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2188 s->mbintra_table[mb_xy]=1;
2190 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2191 uint8_t *dest_y, *dest_cb, *dest_cr;
2192 int dct_linesize, dct_offset;
2193 op_pixels_func (*op_pix)[4];
2194 qpel_mc_func (*op_qpix)[16];
2195 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2196 const int uvlinesize = s->current_picture.f.linesize[1];
2197 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2198 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2200 /* avoid copy if macroblock skipped in last frame too */
2201 /* skip only during decoding as we might trash the buffers during encoding a bit */
2203 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2204 const int age = s->current_picture.f.age;
2208 if (s->mb_skipped) {
2210 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2212 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
2213 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2215 /* if previous was skipped too, then nothing to do ! */
2216 if (*mbskip_ptr >= age && s->current_picture.f.reference){
2219 } else if(!s->current_picture.f.reference) {
2220 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2221 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2223 *mbskip_ptr = 0; /* not skipped */
2227 dct_linesize = linesize << s->interlaced_dct;
2228 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
2232 dest_cb= s->dest[1];
2233 dest_cr= s->dest[2];
2235 dest_y = s->b_scratchpad;
2236 dest_cb= s->b_scratchpad+16*linesize;
2237 dest_cr= s->b_scratchpad+32*linesize;
2241 /* motion handling */
2242 /* decoding or more than one mb_type (MC was already done otherwise) */
2245 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2246 if (s->mv_dir & MV_DIR_FORWARD) {
2247 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
2249 if (s->mv_dir & MV_DIR_BACKWARD) {
2250 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
2255 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2257 if (s->mv_dir & MV_DIR_FORWARD) {
2258 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2259 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2261 if (s->mv_dir & MV_DIR_BACKWARD) {
2262 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2265 op_qpix= s->me.qpel_put;
2266 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2267 op_pix = s->dsp.put_pixels_tab;
2269 op_pix = s->dsp.put_no_rnd_pixels_tab;
2271 if (s->mv_dir & MV_DIR_FORWARD) {
2272 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2273 op_pix = s->dsp.avg_pixels_tab;
2274 op_qpix= s->me.qpel_avg;
2276 if (s->mv_dir & MV_DIR_BACKWARD) {
2277 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2282 /* skip dequant / idct if we are really late ;) */
2283 if(s->avctx->skip_idct){
2284 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2285 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2286 || s->avctx->skip_idct >= AVDISCARD_ALL)
2290 /* add dct residue */
2291 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2292 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2293 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2294 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2295 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2296 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2298 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2299 if (s->chroma_y_shift){
2300 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2301 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2305 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2306 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2307 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2308 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2311 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2312 add_dct(s, block[0], 0, dest_y , dct_linesize);
2313 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2314 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2315 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2317 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2318 if(s->chroma_y_shift){//Chroma420
2319 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2320 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2323 dct_linesize = uvlinesize << s->interlaced_dct;
2324 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
2326 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2327 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2328 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2329 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2330 if(!s->chroma_x_shift){//Chroma444
2331 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2332 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2333 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2334 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2339 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2340 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2343 /* dct only in intra block */
2344 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2345 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2346 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2347 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2348 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2350 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2351 if(s->chroma_y_shift){
2352 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2353 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2357 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2358 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2359 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2360 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2364 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2365 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2366 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2367 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2369 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2370 if(s->chroma_y_shift){
2371 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2372 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2375 dct_linesize = uvlinesize << s->interlaced_dct;
2376 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
2378 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2379 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2380 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2381 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2382 if(!s->chroma_x_shift){//Chroma444
2383 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2384 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2385 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2386 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2394 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2395 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2396 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2401 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2403 if(s->out_format == FMT_MPEG1) {
2404 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2405 else MPV_decode_mb_internal(s, block, 0, 1);
2408 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2409 else MPV_decode_mb_internal(s, block, 0, 0);
2413 * @param h is the normal height, this will be reduced automatically if needed for the last row
2415 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2416 const int field_pic= s->picture_structure != PICT_FRAME;
2422 if (!s->avctx->hwaccel
2423 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2424 && s->unrestricted_mv
2425 && s->current_picture.f.reference
2427 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2428 int sides = 0, edge_h;
2429 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2430 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2431 if (y==0) sides |= EDGE_TOP;
2432 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2434 edge_h= FFMIN(h, s->v_edge_pos - y);
2436 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2437 s->linesize, s->h_edge_pos, edge_h,
2438 EDGE_WIDTH, EDGE_WIDTH, sides);
2439 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2440 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2441 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2442 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2443 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2444 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2447 h= FFMIN(h, s->avctx->height - y);
2449 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2451 if (s->avctx->draw_horiz_band) {
2453 int offset[AV_NUM_DATA_POINTERS];
2456 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2457 src= (AVFrame*)s->current_picture_ptr;
2458 else if(s->last_picture_ptr)
2459 src= (AVFrame*)s->last_picture_ptr;
2463 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2464 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2467 offset[0]= y * s->linesize;
2469 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2470 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2476 s->avctx->draw_horiz_band(s->avctx, src, offset,
2477 y, s->picture_structure, h);
2481 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2482 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2483 const int uvlinesize = s->current_picture.f.linesize[1];
2484 const int mb_size= 4 - s->avctx->lowres;
2486 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2487 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2488 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2489 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2490 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2491 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2492 //block_index is not used by mpeg2, so it is not affected by chroma_format
2494 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2495 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2496 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2498 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2500 if(s->picture_structure==PICT_FRAME){
2501 s->dest[0] += s->mb_y * linesize << mb_size;
2502 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2503 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2505 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2506 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2507 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2508 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2513 void ff_mpeg_flush(AVCodecContext *avctx){
2515 MpegEncContext *s = avctx->priv_data;
2517 if(s==NULL || s->picture==NULL)
2520 for(i=0; i<s->picture_count; i++){
2521 if (s->picture[i].f.data[0] &&
2522 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2523 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2524 free_frame_buffer(s, &s->picture[i]);
2526 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2528 s->mb_x= s->mb_y= 0;
2531 s->parse_context.state= -1;
2532 s->parse_context.frame_start_found= 0;
2533 s->parse_context.overread= 0;
2534 s->parse_context.overread_index= 0;
2535 s->parse_context.index= 0;
2536 s->parse_context.last_index= 0;
2537 s->bitstream_buffer_size=0;
2541 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2542 DCTELEM *block, int n, int qscale)
2544 int i, level, nCoeffs;
2545 const uint16_t *quant_matrix;
2547 nCoeffs= s->block_last_index[n];
2550 block[0] = block[0] * s->y_dc_scale;
2552 block[0] = block[0] * s->c_dc_scale;
2553 /* XXX: only mpeg1 */
2554 quant_matrix = s->intra_matrix;
2555 for(i=1;i<=nCoeffs;i++) {
2556 int j= s->intra_scantable.permutated[i];
2561 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2562 level = (level - 1) | 1;
2565 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2566 level = (level - 1) | 1;
2573 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2574 DCTELEM *block, int n, int qscale)
2576 int i, level, nCoeffs;
2577 const uint16_t *quant_matrix;
2579 nCoeffs= s->block_last_index[n];
2581 quant_matrix = s->inter_matrix;
2582 for(i=0; i<=nCoeffs; i++) {
2583 int j= s->intra_scantable.permutated[i];
2588 level = (((level << 1) + 1) * qscale *
2589 ((int) (quant_matrix[j]))) >> 4;
2590 level = (level - 1) | 1;
2593 level = (((level << 1) + 1) * qscale *
2594 ((int) (quant_matrix[j]))) >> 4;
2595 level = (level - 1) | 1;
2602 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2603 DCTELEM *block, int n, int qscale)
2605 int i, level, nCoeffs;
2606 const uint16_t *quant_matrix;
2608 if(s->alternate_scan) nCoeffs= 63;
2609 else nCoeffs= s->block_last_index[n];
2612 block[0] = block[0] * s->y_dc_scale;
2614 block[0] = block[0] * s->c_dc_scale;
2615 quant_matrix = s->intra_matrix;
2616 for(i=1;i<=nCoeffs;i++) {
2617 int j= s->intra_scantable.permutated[i];
2622 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2625 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2632 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2633 DCTELEM *block, int n, int qscale)
2635 int i, level, nCoeffs;
2636 const uint16_t *quant_matrix;
2639 if(s->alternate_scan) nCoeffs= 63;
2640 else nCoeffs= s->block_last_index[n];
2643 block[0] = block[0] * s->y_dc_scale;
2645 block[0] = block[0] * s->c_dc_scale;
2646 quant_matrix = s->intra_matrix;
2647 for(i=1;i<=nCoeffs;i++) {
2648 int j= s->intra_scantable.permutated[i];
2653 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2656 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2665 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2666 DCTELEM *block, int n, int qscale)
2668 int i, level, nCoeffs;
2669 const uint16_t *quant_matrix;
2672 if(s->alternate_scan) nCoeffs= 63;
2673 else nCoeffs= s->block_last_index[n];
2675 quant_matrix = s->inter_matrix;
2676 for(i=0; i<=nCoeffs; i++) {
2677 int j= s->intra_scantable.permutated[i];
2682 level = (((level << 1) + 1) * qscale *
2683 ((int) (quant_matrix[j]))) >> 4;
2686 level = (((level << 1) + 1) * qscale *
2687 ((int) (quant_matrix[j]))) >> 4;
2696 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2697 DCTELEM *block, int n, int qscale)
2699 int i, level, qmul, qadd;
2702 assert(s->block_last_index[n]>=0);
2708 block[0] = block[0] * s->y_dc_scale;
2710 block[0] = block[0] * s->c_dc_scale;
2711 qadd = (qscale - 1) | 1;
2718 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2720 for(i=1; i<=nCoeffs; i++) {
2724 level = level * qmul - qadd;
2726 level = level * qmul + qadd;
2733 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2734 DCTELEM *block, int n, int qscale)
2736 int i, level, qmul, qadd;
2739 assert(s->block_last_index[n]>=0);
2741 qadd = (qscale - 1) | 1;
2744 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2746 for(i=0; i<=nCoeffs; i++) {
2750 level = level * qmul - qadd;
2752 level = level * qmul + qadd;
2760 * set qscale and update qscale dependent variables.
2762 void ff_set_qscale(MpegEncContext * s, int qscale)
2766 else if (qscale > 31)
2770 s->chroma_qscale= s->chroma_qscale_table[qscale];
2772 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2773 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2776 void MPV_report_decode_progress(MpegEncContext *s)
2778 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2779 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);