2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
63 /* enable all paranoid tests for rounding, overflows, etc... */
69 static const uint8_t ff_default_chroma_qscale_table[32] = {
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
72 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
75 const uint8_t ff_mpeg1_dc_scale_table[128] = {
76 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
84 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
87 static const uint8_t mpeg2_dc_scale_table1[128] = {
88 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
96 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
99 static const uint8_t mpeg2_dc_scale_table2[128] = {
100 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
108 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
111 static const uint8_t mpeg2_dc_scale_table3[128] = {
112 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
120 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
124 ff_mpeg1_dc_scale_table,
125 mpeg2_dc_scale_table1,
126 mpeg2_dc_scale_table2,
127 mpeg2_dc_scale_table3,
130 const enum PixelFormat ff_pixfmt_list_420[] = {
135 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
143 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
145 uint32_t * restrict state)
153 for (i = 0; i < 3; i++) {
154 uint32_t tmp = *state << 8;
155 *state = tmp + *(p++);
156 if (tmp == 0x100 || p == end)
161 if (p[-1] > 1 ) p += 3;
162 else if (p[-2] ) p += 2;
163 else if (p[-3]|(p[-1]-1)) p++;
170 p = FFMIN(p, end) - 4;
176 /* init common dct for both encoder and decoder */
177 av_cold int ff_dct_common_init(MpegEncContext *s)
179 dsputil_init(&s->dsp, s->avctx);
181 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
182 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
183 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
184 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
186 if (s->flags & CODEC_FLAG_BITEXACT)
187 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
188 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
191 MPV_common_init_mmx(s);
193 MPV_common_init_axp(s);
195 MPV_common_init_mlib(s);
197 MPV_common_init_mmi(s);
199 MPV_common_init_arm(s);
201 MPV_common_init_altivec(s);
203 MPV_common_init_bfin(s);
206 /* load & permutate scantables
207 * note: only wmv uses different ones
209 if (s->alternate_scan) {
210 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
211 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
213 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
214 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
216 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
217 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
222 void ff_copy_picture(Picture *dst, Picture *src)
225 dst->f.type = FF_BUFFER_TYPE_COPY;
229 * Release a frame buffer
231 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
233 /* Windows Media Image codecs allocate internal buffers with different
234 * dimensions; ignore user defined callbacks for these
236 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
237 ff_thread_release_buffer(s->avctx, (AVFrame *) pic);
239 avcodec_default_release_buffer(s->avctx, (AVFrame *) pic);
240 av_freep(&pic->f.hwaccel_picture_private);
244 * Allocate a frame buffer
246 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
250 if (s->avctx->hwaccel) {
251 assert(!pic->f.hwaccel_picture_private);
252 if (s->avctx->hwaccel->priv_data_size) {
253 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
254 if (!pic->f.hwaccel_picture_private) {
255 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
261 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
262 r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic);
264 r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic);
266 if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
267 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
268 r, pic->f.age, pic->f.type, pic->f.data[0]);
269 av_freep(&pic->f.hwaccel_picture_private);
273 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
274 s->uvlinesize != pic->f.linesize[1])) {
275 av_log(s->avctx, AV_LOG_ERROR,
276 "get_buffer() failed (stride changed)\n");
277 free_frame_buffer(s, pic);
281 if (pic->f.linesize[1] != pic->f.linesize[2]) {
282 av_log(s->avctx, AV_LOG_ERROR,
283 "get_buffer() failed (uv stride mismatch)\n");
284 free_frame_buffer(s, pic);
292 * allocates a Picture
293 * The pixels are allocated/set by calling get_buffer() if shared = 0
295 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
297 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
299 // the + 1 is needed so memset(,,stride*height) does not sig11
301 const int mb_array_size = s->mb_stride * s->mb_height;
302 const int b8_array_size = s->b8_stride * s->mb_height * 2;
303 const int b4_array_size = s->b4_stride * s->mb_height * 4;
308 assert(pic->f.data[0]);
309 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
310 pic->f.type = FF_BUFFER_TYPE_SHARED;
312 assert(!pic->f.data[0]);
314 if (alloc_frame_buffer(s, pic) < 0)
317 s->linesize = pic->f.linesize[0];
318 s->uvlinesize = pic->f.linesize[1];
321 if (pic->f.qscale_table == NULL) {
323 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
324 mb_array_size * sizeof(int16_t), fail)
325 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
326 mb_array_size * sizeof(int16_t), fail)
327 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
328 mb_array_size * sizeof(int8_t ), fail)
331 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
332 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
333 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
334 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
336 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
337 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
339 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
340 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
341 if (s->out_format == FMT_H264) {
342 for (i = 0; i < 2; i++) {
343 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
344 2 * (b4_array_size + 4) * sizeof(int16_t),
346 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
347 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
348 4 * mb_array_size * sizeof(uint8_t), fail)
350 pic->f.motion_subsample_log2 = 2;
351 } else if (s->out_format == FMT_H263 || s->encoding ||
352 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
353 for (i = 0; i < 2; i++) {
354 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
355 2 * (b8_array_size + 4) * sizeof(int16_t),
357 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
358 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
359 4 * mb_array_size * sizeof(uint8_t), fail)
361 pic->f.motion_subsample_log2 = 3;
363 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
364 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
365 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
367 pic->f.qstride = s->mb_stride;
368 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
369 1 * sizeof(AVPanScan), fail)
372 /* It might be nicer if the application would keep track of these
373 * but it would require an API change. */
374 memmove(s->prev_pict_types + 1, s->prev_pict_types,
375 PREV_PICT_TYPES_BUFFER_SIZE-1);
376 s->prev_pict_types[0] = s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
377 if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE &&
378 s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B)
379 pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2
380 // and it is a bit tricky to skip them anyway.
384 fail: // for the FF_ALLOCZ_OR_GOTO macro
386 free_frame_buffer(s, pic);
391 * deallocates a picture
393 static void free_picture(MpegEncContext *s, Picture *pic)
397 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
398 free_frame_buffer(s, pic);
401 av_freep(&pic->mb_var);
402 av_freep(&pic->mc_mb_var);
403 av_freep(&pic->mb_mean);
404 av_freep(&pic->f.mbskip_table);
405 av_freep(&pic->qscale_table_base);
406 av_freep(&pic->mb_type_base);
407 av_freep(&pic->f.dct_coeff);
408 av_freep(&pic->f.pan_scan);
409 pic->f.mb_type = NULL;
410 for (i = 0; i < 2; i++) {
411 av_freep(&pic->motion_val_base[i]);
412 av_freep(&pic->f.ref_index[i]);
415 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
416 for (i = 0; i < 4; i++) {
418 pic->f.data[i] = NULL;
424 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
426 int y_size = s->b8_stride * (2 * s->mb_height + 1);
427 int c_size = s->mb_stride * (s->mb_height + 1);
428 int yc_size = y_size + 2 * c_size;
431 // edge emu needs blocksize + filter length - 1
432 // (= 17x17 for halfpel / 21x21 for h264)
433 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
434 (s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance
436 // FIXME should be linesize instead of s->width * 2
437 // but that is not known before get_buffer()
438 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
439 (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
440 s->me.temp = s->me.scratchpad;
441 s->rd_scratchpad = s->me.scratchpad;
442 s->b_scratchpad = s->me.scratchpad;
443 s->obmc_scratchpad = s->me.scratchpad + 16;
445 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
446 ME_MAP_SIZE * sizeof(uint32_t), fail)
447 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
448 ME_MAP_SIZE * sizeof(uint32_t), fail)
449 if (s->avctx->noise_reduction) {
450 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
451 2 * 64 * sizeof(int), fail)
454 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
455 s->block = s->blocks[0];
457 for (i = 0; i < 12; i++) {
458 s->pblocks[i] = &s->block[i];
461 if (s->out_format == FMT_H263) {
463 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
464 yc_size * sizeof(int16_t) * 16, fail);
465 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
466 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
467 s->ac_val[2] = s->ac_val[1] + c_size;
472 return -1; // free() through MPV_common_end()
475 static void free_duplicate_context(MpegEncContext *s)
480 av_freep(&s->edge_emu_buffer);
481 av_freep(&s->me.scratchpad);
485 s->obmc_scratchpad = NULL;
487 av_freep(&s->dct_error_sum);
488 av_freep(&s->me.map);
489 av_freep(&s->me.score_map);
490 av_freep(&s->blocks);
491 av_freep(&s->ac_val_base);
495 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
497 #define COPY(a) bak->a = src->a
498 COPY(edge_emu_buffer);
503 COPY(obmc_scratchpad);
510 COPY(me.map_generation);
522 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
526 // FIXME copy only needed parts
528 backup_duplicate_context(&bak, dst);
529 memcpy(dst, src, sizeof(MpegEncContext));
530 backup_duplicate_context(dst, &bak);
531 for (i = 0; i < 12; i++) {
532 dst->pblocks[i] = &dst->block[i];
534 // STOP_TIMER("update_duplicate_context")
535 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
538 int ff_mpeg_update_thread_context(AVCodecContext *dst,
539 const AVCodecContext *src)
541 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
543 if (dst == src || !s1->context_initialized)
546 // FIXME can parameters change on I-frames?
547 // in that case dst may need a reinit
548 if (!s->context_initialized) {
549 memcpy(s, s1, sizeof(MpegEncContext));
552 s->picture_range_start += MAX_PICTURE_COUNT;
553 s->picture_range_end += MAX_PICTURE_COUNT;
554 s->bitstream_buffer = NULL;
555 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
560 s->avctx->coded_height = s1->avctx->coded_height;
561 s->avctx->coded_width = s1->avctx->coded_width;
562 s->avctx->width = s1->avctx->width;
563 s->avctx->height = s1->avctx->height;
565 s->coded_picture_number = s1->coded_picture_number;
566 s->picture_number = s1->picture_number;
567 s->input_picture_number = s1->input_picture_number;
569 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
570 memcpy(&s->last_picture, &s1->last_picture, (char*)&s1->last_picture_ptr - (char*)&s1->last_picture);
572 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
573 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
574 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
576 memcpy(s->prev_pict_types, s1->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
578 //Error/bug resilience
579 s->next_p_frame_damaged = s1->next_p_frame_damaged;
580 s->workaround_bugs = s1->workaround_bugs;
583 memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char*)&s1->shape - (char*)&s1->time_increment_bits);
586 s->max_b_frames = s1->max_b_frames;
587 s->low_delay = s1->low_delay;
588 s->dropable = s1->dropable;
590 //DivX handling (doesn't work)
591 s->divx_packed = s1->divx_packed;
593 if(s1->bitstream_buffer){
594 if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
595 av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size);
596 s->bitstream_buffer_size = s1->bitstream_buffer_size;
597 memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size);
598 memset(s->bitstream_buffer+s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
601 //MPEG2/interlacing info
602 memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char*)&s1->rtp_mode - (char*)&s1->progressive_sequence);
604 if(!s1->first_field){
605 s->last_pict_type= s1->pict_type;
606 if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
608 if (s1->pict_type != AV_PICTURE_TYPE_B) {
609 s->last_non_b_pict_type= s1->pict_type;
617 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
618 * the changed fields will not depend upon the prior state of the MpegEncContext.
620 void MPV_common_defaults(MpegEncContext *s){
622 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
623 s->chroma_qscale_table= ff_default_chroma_qscale_table;
624 s->progressive_frame= 1;
625 s->progressive_sequence= 1;
626 s->picture_structure= PICT_FRAME;
628 s->coded_picture_number = 0;
629 s->picture_number = 0;
630 s->input_picture_number = 0;
632 s->picture_in_gop_number = 0;
637 s->picture_range_start = 0;
638 s->picture_range_end = MAX_PICTURE_COUNT;
642 * sets the given MpegEncContext to defaults for decoding.
643 * the changed fields will not depend upon the prior state of the MpegEncContext.
645 void MPV_decode_defaults(MpegEncContext *s){
646 MPV_common_defaults(s);
650 * init common structure for both encoder and decoder.
651 * this assumes that some variables like width/height are already set
653 av_cold int MPV_common_init(MpegEncContext *s)
655 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y,
656 threads = (s->encoding ||
658 s->avctx->active_thread_type & FF_THREAD_SLICE)) ?
659 s->avctx->thread_count : 1;
661 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
662 s->mb_height = (s->height + 31) / 32 * 2;
663 else if (s->codec_id != CODEC_ID_H264)
664 s->mb_height = (s->height + 15) / 16;
666 if(s->avctx->pix_fmt == PIX_FMT_NONE){
667 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
671 if((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
672 (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){
673 int max_threads = FFMIN(MAX_THREADS, s->mb_height);
674 av_log(s->avctx, AV_LOG_WARNING, "too many threads (%d), reducing to %d\n",
675 s->avctx->thread_count, max_threads);
676 threads = max_threads;
679 if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
682 ff_dct_common_init(s);
684 s->flags= s->avctx->flags;
685 s->flags2= s->avctx->flags2;
687 if (s->width && s->height) {
688 s->mb_width = (s->width + 15) / 16;
689 s->mb_stride = s->mb_width + 1;
690 s->b8_stride = s->mb_width*2 + 1;
691 s->b4_stride = s->mb_width*4 + 1;
692 mb_array_size= s->mb_height * s->mb_stride;
693 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
695 /* set chroma shifts */
696 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
697 &(s->chroma_y_shift) );
699 /* set default edge pos, will be overriden in decode_header if needed */
700 s->h_edge_pos= s->mb_width*16;
701 s->v_edge_pos= s->mb_height*16;
703 s->mb_num = s->mb_width * s->mb_height;
708 s->block_wrap[3]= s->b8_stride;
710 s->block_wrap[5]= s->mb_stride;
712 y_size = s->b8_stride * (2 * s->mb_height + 1);
713 c_size = s->mb_stride * (s->mb_height + 1);
714 yc_size = y_size + 2 * c_size;
716 /* convert fourcc to upper case */
717 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
719 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
721 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
723 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
724 for(y=0; y<s->mb_height; y++){
725 for(x=0; x<s->mb_width; x++){
726 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
729 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
732 /* Allocate MV tables */
733 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
734 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
735 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
736 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
737 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
738 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
739 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
740 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
741 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
742 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
743 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
744 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
746 if(s->msmpeg4_version){
747 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
749 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
751 /* Allocate MB type table */
752 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
754 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
756 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
757 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
758 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
759 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
760 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
761 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
763 if(s->avctx->noise_reduction){
764 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
769 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
770 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail)
771 for(i = 0; i < s->picture_count; i++) {
772 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
775 if (s->width && s->height) {
776 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
778 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
779 /* interlaced direct mode decoding tables */
784 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
785 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
787 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
788 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
789 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
791 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
794 if (s->out_format == FMT_H263) {
796 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
797 s->coded_block= s->coded_block_base + s->b8_stride + 1;
799 /* cbp, ac_pred, pred_dir */
800 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
801 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
804 if (s->h263_pred || s->h263_plus || !s->encoding) {
806 //MN: we need these for error resilience of intra-frames
807 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
808 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
809 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
810 s->dc_val[2] = s->dc_val[1] + c_size;
811 for(i=0;i<yc_size;i++)
812 s->dc_val_base[i] = 1024;
815 /* which mb is a intra block */
816 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
817 memset(s->mbintra_table, 1, mb_array_size);
819 /* init macroblock skip table */
820 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
821 //Note the +1 is for a quicker mpeg4 slice_end detection
822 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
824 s->parse_context.state= -1;
825 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
826 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
827 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
828 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
832 s->context_initialized = 1;
833 s->thread_context[0]= s;
835 if (s->width && s->height) {
836 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
837 for(i=1; i<threads; i++){
838 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
839 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
842 for(i=0; i<threads; i++){
843 if(init_duplicate_context(s->thread_context[i], s) < 0)
845 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
846 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
849 if(init_duplicate_context(s, s) < 0) goto fail;
851 s->end_mb_y = s->mb_height;
861 /* init common structure for both encoder and decoder */
862 void MPV_common_end(MpegEncContext *s)
866 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
867 for(i=0; i<s->avctx->thread_count; i++){
868 free_duplicate_context(s->thread_context[i]);
870 for(i=1; i<s->avctx->thread_count; i++){
871 av_freep(&s->thread_context[i]);
873 } else free_duplicate_context(s);
875 av_freep(&s->parse_context.buffer);
876 s->parse_context.buffer_size=0;
878 av_freep(&s->mb_type);
879 av_freep(&s->p_mv_table_base);
880 av_freep(&s->b_forw_mv_table_base);
881 av_freep(&s->b_back_mv_table_base);
882 av_freep(&s->b_bidir_forw_mv_table_base);
883 av_freep(&s->b_bidir_back_mv_table_base);
884 av_freep(&s->b_direct_mv_table_base);
886 s->b_forw_mv_table= NULL;
887 s->b_back_mv_table= NULL;
888 s->b_bidir_forw_mv_table= NULL;
889 s->b_bidir_back_mv_table= NULL;
890 s->b_direct_mv_table= NULL;
894 av_freep(&s->b_field_mv_table_base[i][j][k]);
895 s->b_field_mv_table[i][j][k]=NULL;
897 av_freep(&s->b_field_select_table[i][j]);
898 av_freep(&s->p_field_mv_table_base[i][j]);
899 s->p_field_mv_table[i][j]=NULL;
901 av_freep(&s->p_field_select_table[i]);
904 av_freep(&s->dc_val_base);
905 av_freep(&s->coded_block_base);
906 av_freep(&s->mbintra_table);
907 av_freep(&s->cbp_table);
908 av_freep(&s->pred_dir_table);
910 av_freep(&s->mbskip_table);
911 av_freep(&s->prev_pict_types);
912 av_freep(&s->bitstream_buffer);
913 s->allocated_bitstream_buffer_size=0;
915 av_freep(&s->avctx->stats_out);
916 av_freep(&s->ac_stats);
917 av_freep(&s->error_status_table);
918 av_freep(&s->mb_index2xy);
919 av_freep(&s->lambda_table);
920 av_freep(&s->q_intra_matrix);
921 av_freep(&s->q_inter_matrix);
922 av_freep(&s->q_intra_matrix16);
923 av_freep(&s->q_inter_matrix16);
924 av_freep(&s->input_picture);
925 av_freep(&s->reordered_input_picture);
926 av_freep(&s->dct_offset);
928 if(s->picture && !s->avctx->internal->is_copy){
929 for(i=0; i<s->picture_count; i++){
930 free_picture(s, &s->picture[i]);
933 av_freep(&s->picture);
934 s->context_initialized = 0;
937 s->current_picture_ptr= NULL;
938 s->linesize= s->uvlinesize= 0;
941 av_freep(&s->visualization_buffer[i]);
943 if(!(s->avctx->active_thread_type&FF_THREAD_FRAME))
944 avcodec_default_free_buffers(s->avctx);
947 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
949 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
950 uint8_t index_run[MAX_RUN+1];
951 int last, run, level, start, end, i;
953 /* If table is static, we can quit if rl->max_level[0] is not NULL */
954 if(static_store && rl->max_level[0])
957 /* compute max_level[], max_run[] and index_run[] */
958 for(last=0;last<2;last++) {
967 memset(max_level, 0, MAX_RUN + 1);
968 memset(max_run, 0, MAX_LEVEL + 1);
969 memset(index_run, rl->n, MAX_RUN + 1);
970 for(i=start;i<end;i++) {
971 run = rl->table_run[i];
972 level = rl->table_level[i];
973 if (index_run[run] == rl->n)
975 if (level > max_level[run])
976 max_level[run] = level;
977 if (run > max_run[level])
978 max_run[level] = run;
981 rl->max_level[last] = static_store[last];
983 rl->max_level[last] = av_malloc(MAX_RUN + 1);
984 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
986 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
988 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
989 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
991 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
993 rl->index_run[last] = av_malloc(MAX_RUN + 1);
994 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
998 void init_vlc_rl(RLTable *rl)
1002 for(q=0; q<32; q++){
1010 for(i=0; i<rl->vlc.table_size; i++){
1011 int code= rl->vlc.table[i][0];
1012 int len = rl->vlc.table[i][1];
1015 if(len==0){ // illegal code
1018 }else if(len<0){ //more bits needed
1022 if(code==rl->n){ //esc
1026 run= rl->table_run [code] + 1;
1027 level= rl->table_level[code] * qmul + qadd;
1028 if(code >= rl->last) run+=192;
1031 rl->rl_vlc[q][i].len= len;
1032 rl->rl_vlc[q][i].level= level;
1033 rl->rl_vlc[q][i].run= run;
1038 void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
1042 /* release non reference frames */
1043 for(i=0; i<s->picture_count; i++){
1044 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1045 && (!s->picture[i].owner2 || s->picture[i].owner2 == s)
1046 && (remove_current || &s->picture[i] != s->current_picture_ptr)
1047 /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1048 free_frame_buffer(s, &s->picture[i]);
1053 int ff_find_unused_picture(MpegEncContext *s, int shared){
1057 for(i=s->picture_range_start; i<s->picture_range_end; i++){
1058 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1062 for(i=s->picture_range_start; i<s->picture_range_end; i++){
1063 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
1066 for(i=s->picture_range_start; i<s->picture_range_end; i++){
1067 if (s->picture[i].f.data[0] == NULL)
1072 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
1073 /* We could return -1, but the codec would crash trying to draw into a
1074 * non-existing frame anyway. This is safer than waiting for a random crash.
1075 * Also the return of this is never useful, an encoder must only allocate
1076 * as much as allowed in the specification. This has no relationship to how
1077 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1078 * enough for such valid streams).
1079 * Plus, a decoder has to check stream validity and remove frames if too
1080 * many reference frames are around. Waiting for "OOM" is not correct at
1081 * all. Similarly, missing reference frames have to be replaced by
1082 * interpolated/MC frames, anything else is a bug in the codec ...
1088 static void update_noise_reduction(MpegEncContext *s){
1091 for(intra=0; intra<2; intra++){
1092 if(s->dct_count[intra] > (1<<16)){
1093 for(i=0; i<64; i++){
1094 s->dct_error_sum[intra][i] >>=1;
1096 s->dct_count[intra] >>= 1;
1099 for(i=0; i<64; i++){
1100 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1106 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1108 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1114 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1116 /* mark&release old frames */
1117 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f.data[0]) {
1118 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1119 if (s->last_picture_ptr->owner2 == s)
1120 free_frame_buffer(s, s->last_picture_ptr);
1122 /* release forgotten pictures */
1123 /* if(mpeg124/h263) */
1125 for(i=0; i<s->picture_count; i++){
1126 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].f.reference) {
1127 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1128 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1129 free_frame_buffer(s, &s->picture[i]);
1137 ff_release_unused_pictures(s, 1);
1139 if (s->current_picture_ptr && s->current_picture_ptr->f.data[0] == NULL)
1140 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
1142 i= ff_find_unused_picture(s, 0);
1143 pic= &s->picture[i];
1146 pic->f.reference = 0;
1148 if (s->codec_id == CODEC_ID_H264)
1149 pic->f.reference = s->picture_structure;
1150 else if (s->pict_type != AV_PICTURE_TYPE_B)
1151 pic->f.reference = 3;
1154 pic->f.coded_picture_number = s->coded_picture_number++;
1156 if(ff_alloc_picture(s, pic, 0) < 0)
1159 s->current_picture_ptr= pic;
1160 //FIXME use only the vars from current_pic
1161 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1162 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1163 if(s->picture_structure != PICT_FRAME)
1164 s->current_picture_ptr->f.top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1166 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && !s->progressive_sequence;
1167 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1170 s->current_picture_ptr->f.pict_type = s->pict_type;
1171 // if(s->flags && CODEC_FLAG_QSCALE)
1172 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1173 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1175 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1177 if (s->pict_type != AV_PICTURE_TYPE_B) {
1178 s->last_picture_ptr= s->next_picture_ptr;
1180 s->next_picture_ptr= s->current_picture_ptr;
1182 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1183 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1184 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1185 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1186 s->pict_type, s->dropable);*/
1188 if(s->codec_id != CODEC_ID_H264){
1189 if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f.data[0] == NULL) &&
1190 (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
1191 if (s->pict_type != AV_PICTURE_TYPE_I)
1192 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1193 else if (s->picture_structure != PICT_FRAME)
1194 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
1196 /* Allocate a dummy frame */
1197 i= ff_find_unused_picture(s, 0);
1198 s->last_picture_ptr= &s->picture[i];
1199 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1201 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
1202 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
1204 if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.data[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) {
1205 /* Allocate a dummy frame */
1206 i= ff_find_unused_picture(s, 0);
1207 s->next_picture_ptr= &s->picture[i];
1208 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1210 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
1211 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
1215 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1216 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1218 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f.data[0]));
1220 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1223 if(s->picture_structure == PICT_BOTTOM_FIELD){
1224 s->current_picture.f.data[i] += s->current_picture.f.linesize[i];
1226 s->current_picture.f.linesize[i] *= 2;
1227 s->last_picture.f.linesize[i] *= 2;
1228 s->next_picture.f.linesize[i] *= 2;
1232 s->error_recognition= avctx->error_recognition;
1234 /* set dequantizer, we can't do it during init as it might change for mpeg4
1235 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1236 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1237 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1238 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1239 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1240 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1241 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1243 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1244 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1247 if(s->dct_error_sum){
1248 assert(s->avctx->noise_reduction && s->encoding);
1250 update_noise_reduction(s);
1253 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1254 return ff_xvmc_field_start(s, avctx);
1259 /* generic function for encode/decode called after a frame has been coded/decoded */
1260 void MPV_frame_end(MpegEncContext *s)
1263 /* redraw edges for the frame if decoding didn't complete */
1264 //just to make sure that all data is rendered.
1265 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1266 ff_xvmc_field_end(s);
1267 }else if((s->error_count || s->encoding)
1268 && !s->avctx->hwaccel
1269 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1270 && s->unrestricted_mv
1271 && s->current_picture.f.reference
1273 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1274 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1275 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1276 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1277 s->h_edge_pos , s->v_edge_pos,
1278 EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
1279 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1280 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1281 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1282 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1283 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1284 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1289 s->last_pict_type = s->pict_type;
1290 s->last_lambda_for[s->pict_type] = s->current_picture_ptr->f.quality;
1291 if(s->pict_type!=AV_PICTURE_TYPE_B){
1292 s->last_non_b_pict_type= s->pict_type;
1295 /* copy back current_picture variables */
1296 for(i=0; i<MAX_PICTURE_COUNT; i++){
1297 if(s->picture[i].f.data[0] == s->current_picture.f.data[0]){
1298 s->picture[i]= s->current_picture;
1302 assert(i<MAX_PICTURE_COUNT);
1306 /* release non-reference frames */
1307 for(i=0; i<s->picture_count; i++){
1308 if (s->picture[i].f.data[0] && !s->picture[i].f.reference /*&& s->picture[i].type != FF_BUFFER_TYPE_SHARED*/) {
1309 free_frame_buffer(s, &s->picture[i]);
1313 // clear copies, to avoid confusion
1315 memset(&s->last_picture, 0, sizeof(Picture));
1316 memset(&s->next_picture, 0, sizeof(Picture));
1317 memset(&s->current_picture, 0, sizeof(Picture));
1319 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1321 if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
1322 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
1327 * draws an line from (ex, ey) -> (sx, sy).
1328 * @param w width of the image
1329 * @param h height of the image
1330 * @param stride stride/linesize of the image
1331 * @param color color of the arrow
1333 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1336 sx= av_clip(sx, 0, w-1);
1337 sy= av_clip(sy, 0, h-1);
1338 ex= av_clip(ex, 0, w-1);
1339 ey= av_clip(ey, 0, h-1);
1341 buf[sy*stride + sx]+= color;
1343 if(FFABS(ex - sx) > FFABS(ey - sy)){
1345 FFSWAP(int, sx, ex);
1346 FFSWAP(int, sy, ey);
1348 buf+= sx + sy*stride;
1350 f= ((ey-sy)<<16)/ex;
1351 for(x= 0; x <= ex; x++){
1354 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1355 buf[(y+1)*stride + x]+= (color* fr )>>16;
1359 FFSWAP(int, sx, ex);
1360 FFSWAP(int, sy, ey);
1362 buf+= sx + sy*stride;
1364 if(ey) f= ((ex-sx)<<16)/ey;
1366 for(y= 0; y <= ey; y++){
1369 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1370 buf[y*stride + x+1]+= (color* fr )>>16;
1376 * draws an arrow from (ex, ey) -> (sx, sy).
1377 * @param w width of the image
1378 * @param h height of the image
1379 * @param stride stride/linesize of the image
1380 * @param color color of the arrow
1382 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1385 sx= av_clip(sx, -100, w+100);
1386 sy= av_clip(sy, -100, h+100);
1387 ex= av_clip(ex, -100, w+100);
1388 ey= av_clip(ey, -100, h+100);
1393 if(dx*dx + dy*dy > 3*3){
1396 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1398 //FIXME subpixel accuracy
1399 rx= ROUNDED_DIV(rx*3<<4, length);
1400 ry= ROUNDED_DIV(ry*3<<4, length);
1402 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1403 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1405 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1409 * prints debuging info for the given picture.
1411 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1413 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1415 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1418 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1419 switch (pict->pict_type) {
1420 case AV_PICTURE_TYPE_I: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1421 case AV_PICTURE_TYPE_P: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1422 case AV_PICTURE_TYPE_B: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1423 case AV_PICTURE_TYPE_S: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1424 case AV_PICTURE_TYPE_SI: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1425 case AV_PICTURE_TYPE_SP: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1427 for(y=0; y<s->mb_height; y++){
1428 for(x=0; x<s->mb_width; x++){
1429 if(s->avctx->debug&FF_DEBUG_SKIP){
1430 int count= s->mbskip_table[x + y*s->mb_stride];
1431 if(count>9) count=9;
1432 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1434 if(s->avctx->debug&FF_DEBUG_QP){
1435 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1437 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1438 int mb_type= pict->mb_type[x + y*s->mb_stride];
1439 //Type & MV direction
1441 av_log(s->avctx, AV_LOG_DEBUG, "P");
1442 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1443 av_log(s->avctx, AV_LOG_DEBUG, "A");
1444 else if(IS_INTRA4x4(mb_type))
1445 av_log(s->avctx, AV_LOG_DEBUG, "i");
1446 else if(IS_INTRA16x16(mb_type))
1447 av_log(s->avctx, AV_LOG_DEBUG, "I");
1448 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1449 av_log(s->avctx, AV_LOG_DEBUG, "d");
1450 else if(IS_DIRECT(mb_type))
1451 av_log(s->avctx, AV_LOG_DEBUG, "D");
1452 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1453 av_log(s->avctx, AV_LOG_DEBUG, "g");
1454 else if(IS_GMC(mb_type))
1455 av_log(s->avctx, AV_LOG_DEBUG, "G");
1456 else if(IS_SKIP(mb_type))
1457 av_log(s->avctx, AV_LOG_DEBUG, "S");
1458 else if(!USES_LIST(mb_type, 1))
1459 av_log(s->avctx, AV_LOG_DEBUG, ">");
1460 else if(!USES_LIST(mb_type, 0))
1461 av_log(s->avctx, AV_LOG_DEBUG, "<");
1463 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1464 av_log(s->avctx, AV_LOG_DEBUG, "X");
1469 av_log(s->avctx, AV_LOG_DEBUG, "+");
1470 else if(IS_16X8(mb_type))
1471 av_log(s->avctx, AV_LOG_DEBUG, "-");
1472 else if(IS_8X16(mb_type))
1473 av_log(s->avctx, AV_LOG_DEBUG, "|");
1474 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1475 av_log(s->avctx, AV_LOG_DEBUG, " ");
1477 av_log(s->avctx, AV_LOG_DEBUG, "?");
1480 if(IS_INTERLACED(mb_type))
1481 av_log(s->avctx, AV_LOG_DEBUG, "=");
1483 av_log(s->avctx, AV_LOG_DEBUG, " ");
1485 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1487 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1491 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1492 const int shift= 1 + s->quarter_sample;
1496 int h_chroma_shift, v_chroma_shift, block_height;
1497 const int width = s->avctx->width;
1498 const int height= s->avctx->height;
1499 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1500 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1501 s->low_delay=0; //needed to see the vectors without trashing the buffers
1503 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1505 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1506 pict->data[i]= s->visualization_buffer[i];
1508 pict->type= FF_BUFFER_TYPE_COPY;
1510 block_height = 16>>v_chroma_shift;
1512 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1514 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1515 const int mb_index= mb_x + mb_y*s->mb_stride;
1516 if((s->avctx->debug_mv) && pict->motion_val){
1518 for(type=0; type<3; type++){
1521 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
1525 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1529 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1534 if(!USES_LIST(pict->mb_type[mb_index], direction))
1537 if(IS_8X8(pict->mb_type[mb_index])){
1540 int sx= mb_x*16 + 4 + 8*(i&1);
1541 int sy= mb_y*16 + 4 + 8*(i>>1);
1542 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1543 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1544 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1545 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1547 }else if(IS_16X8(pict->mb_type[mb_index])){
1551 int sy=mb_y*16 + 4 + 8*i;
1552 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1553 int mx=(pict->motion_val[direction][xy][0]>>shift);
1554 int my=(pict->motion_val[direction][xy][1]>>shift);
1556 if(IS_INTERLACED(pict->mb_type[mb_index]))
1559 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1561 }else if(IS_8X16(pict->mb_type[mb_index])){
1564 int sx=mb_x*16 + 4 + 8*i;
1566 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1567 int mx=(pict->motion_val[direction][xy][0]>>shift);
1568 int my=(pict->motion_val[direction][xy][1]>>shift);
1570 if(IS_INTERLACED(pict->mb_type[mb_index]))
1573 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1576 int sx= mb_x*16 + 8;
1577 int sy= mb_y*16 + 8;
1578 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1579 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1580 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1581 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1585 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1586 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1588 for(y=0; y<block_height; y++){
1589 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1590 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1593 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1594 int mb_type= pict->mb_type[mb_index];
1597 #define COLOR(theta, r)\
1598 u= (int)(128 + r*cos(theta*3.141592/180));\
1599 v= (int)(128 + r*sin(theta*3.141592/180));
1603 if(IS_PCM(mb_type)){
1605 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1607 }else if(IS_INTRA4x4(mb_type)){
1609 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1611 }else if(IS_DIRECT(mb_type)){
1613 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1615 }else if(IS_GMC(mb_type)){
1617 }else if(IS_SKIP(mb_type)){
1619 }else if(!USES_LIST(mb_type, 1)){
1621 }else if(!USES_LIST(mb_type, 0)){
1624 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1628 u*= 0x0101010101010101ULL;
1629 v*= 0x0101010101010101ULL;
1630 for(y=0; y<block_height; y++){
1631 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1632 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1636 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1637 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1638 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1640 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1642 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1644 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1645 int dm= 1 << (mv_sample_log2-2);
1647 int sx= mb_x*16 + 8*(i&1);
1648 int sy= mb_y*16 + 8*(i>>1);
1649 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1651 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1652 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1654 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1655 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1656 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1660 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1664 s->mbskip_table[mb_index]=0;
1670 static inline int hpel_motion_lowres(MpegEncContext *s,
1671 uint8_t *dest, uint8_t *src,
1672 int field_based, int field_select,
1673 int src_x, int src_y,
1674 int width, int height, int stride,
1675 int h_edge_pos, int v_edge_pos,
1676 int w, int h, h264_chroma_mc_func *pix_op,
1677 int motion_x, int motion_y)
1679 const int lowres= s->avctx->lowres;
1680 const int op_index= FFMIN(lowres, 2);
1681 const int s_mask= (2<<lowres)-1;
1685 if(s->quarter_sample){
1690 sx= motion_x & s_mask;
1691 sy= motion_y & s_mask;
1692 src_x += motion_x >> (lowres+1);
1693 src_y += motion_y >> (lowres+1);
1695 src += src_y * stride + src_x;
1697 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1698 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1699 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1700 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1701 src= s->edge_emu_buffer;
1705 sx= (sx << 2) >> lowres;
1706 sy= (sy << 2) >> lowres;
1709 pix_op[op_index](dest, src, stride, h, sx, sy);
1713 /* apply one mpeg motion vector to the three components */
1714 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1715 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1716 int field_based, int bottom_field, int field_select,
1717 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1718 int motion_x, int motion_y, int h, int mb_y)
1720 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1721 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1722 const int lowres= s->avctx->lowres;
1723 const int op_index= FFMIN(lowres, 2);
1724 const int block_s= 8>>lowres;
1725 const int s_mask= (2<<lowres)-1;
1726 const int h_edge_pos = s->h_edge_pos >> lowres;
1727 const int v_edge_pos = s->v_edge_pos >> lowres;
1728 linesize = s->current_picture.f.linesize[0] << field_based;
1729 uvlinesize = s->current_picture.f.linesize[1] << field_based;
1731 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1737 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1740 sx= motion_x & s_mask;
1741 sy= motion_y & s_mask;
1742 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1743 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1745 if (s->out_format == FMT_H263) {
1746 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1747 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1750 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1753 uvsx = (2*mx) & s_mask;
1754 uvsy = (2*my) & s_mask;
1755 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1756 uvsrc_y = mb_y*block_s + (my >> lowres);
1762 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1763 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1766 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1767 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1768 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1770 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1771 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1772 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1773 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1774 ptr_y = s->edge_emu_buffer;
1775 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1776 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1777 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1778 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1779 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1780 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1786 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
1787 dest_y += s->linesize;
1788 dest_cb+= s->uvlinesize;
1789 dest_cr+= s->uvlinesize;
1793 ptr_y += s->linesize;
1794 ptr_cb+= s->uvlinesize;
1795 ptr_cr+= s->uvlinesize;
1798 sx= (sx << 2) >> lowres;
1799 sy= (sy << 2) >> lowres;
1800 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1802 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1803 uvsx= (uvsx << 2) >> lowres;
1804 uvsy= (uvsy << 2) >> lowres;
1805 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1806 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1808 //FIXME h261 lowres loop filter
1811 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1812 uint8_t *dest_cb, uint8_t *dest_cr,
1813 uint8_t **ref_picture,
1814 h264_chroma_mc_func *pix_op,
1816 const int lowres= s->avctx->lowres;
1817 const int op_index= FFMIN(lowres, 2);
1818 const int block_s= 8>>lowres;
1819 const int s_mask= (2<<lowres)-1;
1820 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1821 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1822 int emu=0, src_x, src_y, offset, sx, sy;
1825 if(s->quarter_sample){
1830 /* In case of 8X8, we construct a single chroma motion vector
1831 with a special rounding */
1832 mx= ff_h263_round_chroma(mx);
1833 my= ff_h263_round_chroma(my);
1837 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1838 src_y = s->mb_y*block_s + (my >> (lowres+1));
1840 offset = src_y * s->uvlinesize + src_x;
1841 ptr = ref_picture[1] + offset;
1842 if(s->flags&CODEC_FLAG_EMU_EDGE){
1843 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1844 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1845 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1846 ptr= s->edge_emu_buffer;
1850 sx= (sx << 2) >> lowres;
1851 sy= (sy << 2) >> lowres;
1852 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1854 ptr = ref_picture[2] + offset;
1856 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1857 ptr= s->edge_emu_buffer;
1859 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1863 * motion compensation of a single macroblock
1865 * @param dest_y luma destination pointer
1866 * @param dest_cb chroma cb/u destination pointer
1867 * @param dest_cr chroma cr/v destination pointer
1868 * @param dir direction (0->forward, 1->backward)
1869 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1870 * @param pix_op halfpel motion compensation function (average or put normally)
1871 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1873 static inline void MPV_motion_lowres(MpegEncContext *s,
1874 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1875 int dir, uint8_t **ref_picture,
1876 h264_chroma_mc_func *pix_op)
1880 const int lowres= s->avctx->lowres;
1881 const int block_s= 8>>lowres;
1886 switch(s->mv_type) {
1888 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1890 ref_picture, pix_op,
1891 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1897 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1898 ref_picture[0], 0, 0,
1899 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1900 s->width, s->height, s->linesize,
1901 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1902 block_s, block_s, pix_op,
1903 s->mv[dir][i][0], s->mv[dir][i][1]);
1905 mx += s->mv[dir][i][0];
1906 my += s->mv[dir][i][1];
1909 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1910 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1913 if (s->picture_structure == PICT_FRAME) {
1915 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1916 1, 0, s->field_select[dir][0],
1917 ref_picture, pix_op,
1918 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1920 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1921 1, 1, s->field_select[dir][1],
1922 ref_picture, pix_op,
1923 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1925 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
1926 ref_picture = s->current_picture_ptr->f.data;
1929 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1930 0, 0, s->field_select[dir][0],
1931 ref_picture, pix_op,
1932 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1937 uint8_t ** ref2picture;
1939 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
1940 ref2picture= ref_picture;
1942 ref2picture = s->current_picture_ptr->f.data;
1945 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1946 0, 0, s->field_select[dir][i],
1947 ref2picture, pix_op,
1948 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
1950 dest_y += 2*block_s*s->linesize;
1951 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1952 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1956 if(s->picture_structure == PICT_FRAME){
1960 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1962 ref_picture, pix_op,
1963 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
1965 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1969 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1970 0, 0, s->picture_structure != i+1,
1971 ref_picture, pix_op,
1972 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1974 // after put we make avg of the same block
1975 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1977 //opposite parity is always in the same frame if this is second field
1978 if(!s->first_field){
1979 ref_picture = s->current_picture_ptr->f.data;
1989 * find the lowest MB row referenced in the MVs
1991 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1993 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1994 int my, off, i, mvs;
1996 if (s->picture_structure != PICT_FRAME) goto unhandled;
1998 switch (s->mv_type) {
2012 for (i = 0; i < mvs; i++) {
2013 my = s->mv[dir][i][1]<<qpel_shift;
2014 my_max = FFMAX(my_max, my);
2015 my_min = FFMIN(my_min, my);
2018 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2020 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2022 return s->mb_height-1;
2025 /* put block[] to dest[] */
2026 static inline void put_dct(MpegEncContext *s,
2027 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2029 s->dct_unquantize_intra(s, block, i, qscale);
2030 s->dsp.idct_put (dest, line_size, block);
2033 /* add block[] to dest[] */
2034 static inline void add_dct(MpegEncContext *s,
2035 DCTELEM *block, int i, uint8_t *dest, int line_size)
2037 if (s->block_last_index[i] >= 0) {
2038 s->dsp.idct_add (dest, line_size, block);
2042 static inline void add_dequant_dct(MpegEncContext *s,
2043 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2045 if (s->block_last_index[i] >= 0) {
2046 s->dct_unquantize_inter(s, block, i, qscale);
2048 s->dsp.idct_add (dest, line_size, block);
2053 * cleans dc, ac, coded_block for the current non intra MB
2055 void ff_clean_intra_table_entries(MpegEncContext *s)
2057 int wrap = s->b8_stride;
2058 int xy = s->block_index[0];
2061 s->dc_val[0][xy + 1 ] =
2062 s->dc_val[0][xy + wrap] =
2063 s->dc_val[0][xy + 1 + wrap] = 1024;
2065 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2066 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2067 if (s->msmpeg4_version>=3) {
2068 s->coded_block[xy ] =
2069 s->coded_block[xy + 1 ] =
2070 s->coded_block[xy + wrap] =
2071 s->coded_block[xy + 1 + wrap] = 0;
2074 wrap = s->mb_stride;
2075 xy = s->mb_x + s->mb_y * wrap;
2077 s->dc_val[2][xy] = 1024;
2079 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2080 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2082 s->mbintra_table[xy]= 0;
2085 /* generic function called after a macroblock has been parsed by the
2086 decoder or after it has been encoded by the encoder.
2088 Important variables used:
2089 s->mb_intra : true if intra macroblock
2090 s->mv_dir : motion vector direction
2091 s->mv_type : motion vector type
2092 s->mv : motion vector
2093 s->interlaced_dct : true if interlaced dct used (mpeg2)
2095 static av_always_inline
2096 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2097 int lowres_flag, int is_mpeg12)
2099 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2100 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2101 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2105 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2106 /* save DCT coefficients */
2108 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2109 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2111 for(j=0; j<64; j++){
2112 *dct++ = block[i][s->dsp.idct_permutation[j]];
2113 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2115 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2119 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2121 /* update DC predictors for P macroblocks */
2123 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2124 if(s->mbintra_table[mb_xy])
2125 ff_clean_intra_table_entries(s);
2129 s->last_dc[2] = 128 << s->intra_dc_precision;
2132 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2133 s->mbintra_table[mb_xy]=1;
2135 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2136 uint8_t *dest_y, *dest_cb, *dest_cr;
2137 int dct_linesize, dct_offset;
2138 op_pixels_func (*op_pix)[4];
2139 qpel_mc_func (*op_qpix)[16];
2140 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2141 const int uvlinesize = s->current_picture.f.linesize[1];
2142 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2143 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2145 /* avoid copy if macroblock skipped in last frame too */
2146 /* skip only during decoding as we might trash the buffers during encoding a bit */
2148 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2149 const int age = s->current_picture.f.age;
2153 if (s->mb_skipped) {
2155 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2157 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
2158 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2160 /* if previous was skipped too, then nothing to do ! */
2161 if (*mbskip_ptr >= age && s->current_picture.f.reference){
2164 } else if(!s->current_picture.f.reference) {
2165 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2166 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2168 *mbskip_ptr = 0; /* not skipped */
2172 dct_linesize = linesize << s->interlaced_dct;
2173 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
2177 dest_cb= s->dest[1];
2178 dest_cr= s->dest[2];
2180 dest_y = s->b_scratchpad;
2181 dest_cb= s->b_scratchpad+16*linesize;
2182 dest_cr= s->b_scratchpad+32*linesize;
2186 /* motion handling */
2187 /* decoding or more than one mb_type (MC was already done otherwise) */
2190 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2191 if (s->mv_dir & MV_DIR_FORWARD) {
2192 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
2194 if (s->mv_dir & MV_DIR_BACKWARD) {
2195 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
2200 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2202 if (s->mv_dir & MV_DIR_FORWARD) {
2203 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2204 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2206 if (s->mv_dir & MV_DIR_BACKWARD) {
2207 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2210 op_qpix= s->me.qpel_put;
2211 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2212 op_pix = s->dsp.put_pixels_tab;
2214 op_pix = s->dsp.put_no_rnd_pixels_tab;
2216 if (s->mv_dir & MV_DIR_FORWARD) {
2217 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2218 op_pix = s->dsp.avg_pixels_tab;
2219 op_qpix= s->me.qpel_avg;
2221 if (s->mv_dir & MV_DIR_BACKWARD) {
2222 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2227 /* skip dequant / idct if we are really late ;) */
2228 if(s->avctx->skip_idct){
2229 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2230 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2231 || s->avctx->skip_idct >= AVDISCARD_ALL)
2235 /* add dct residue */
2236 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2237 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2238 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2239 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2240 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2241 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2243 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2244 if (s->chroma_y_shift){
2245 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2246 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2250 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2251 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2252 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2253 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2256 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2257 add_dct(s, block[0], 0, dest_y , dct_linesize);
2258 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2259 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2260 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2262 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2263 if(s->chroma_y_shift){//Chroma420
2264 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2265 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2268 dct_linesize = uvlinesize << s->interlaced_dct;
2269 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2271 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2272 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2273 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2274 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2275 if(!s->chroma_x_shift){//Chroma444
2276 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2277 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2278 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2279 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2284 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2285 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2288 /* dct only in intra block */
2289 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2290 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2291 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2292 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2293 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2295 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2296 if(s->chroma_y_shift){
2297 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2298 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2302 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2303 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2304 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2305 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2309 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2310 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2311 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2312 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2314 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2315 if(s->chroma_y_shift){
2316 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2317 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2320 dct_linesize = uvlinesize << s->interlaced_dct;
2321 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2323 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2324 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2325 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2326 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2327 if(!s->chroma_x_shift){//Chroma444
2328 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2329 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2330 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2331 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2339 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2340 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2341 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2346 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2348 if(s->out_format == FMT_MPEG1) {
2349 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2350 else MPV_decode_mb_internal(s, block, 0, 1);
2353 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2354 else MPV_decode_mb_internal(s, block, 0, 0);
2359 * @param h is the normal height, this will be reduced automatically if needed for the last row
2361 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2362 const int field_pic= s->picture_structure != PICT_FRAME;
2368 if (!s->avctx->hwaccel
2369 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2370 && s->unrestricted_mv
2371 && s->current_picture.f.reference
2373 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2374 int sides = 0, edge_h;
2375 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2376 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2377 if (y==0) sides |= EDGE_TOP;
2378 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2380 edge_h= FFMIN(h, s->v_edge_pos - y);
2382 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2383 s->linesize, s->h_edge_pos, edge_h,
2384 EDGE_WIDTH, EDGE_WIDTH, sides);
2385 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2386 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2387 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2388 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2389 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2390 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2393 h= FFMIN(h, s->avctx->height - y);
2395 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2397 if (s->avctx->draw_horiz_band) {
2399 int offset[AV_NUM_DATA_POINTERS];
2402 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2403 src= (AVFrame*)s->current_picture_ptr;
2404 else if(s->last_picture_ptr)
2405 src= (AVFrame*)s->last_picture_ptr;
2409 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2410 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2413 offset[0]= y * s->linesize;
2415 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2416 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2422 s->avctx->draw_horiz_band(s->avctx, src, offset,
2423 y, s->picture_structure, h);
2427 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2428 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2429 const int uvlinesize = s->current_picture.f.linesize[1];
2430 const int mb_size= 4 - s->avctx->lowres;
2432 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2433 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2434 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2435 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2436 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2437 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2438 //block_index is not used by mpeg2, so it is not affected by chroma_format
2440 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2441 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2442 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2444 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2446 if(s->picture_structure==PICT_FRAME){
2447 s->dest[0] += s->mb_y * linesize << mb_size;
2448 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2449 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2451 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2452 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2453 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2454 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2459 void ff_mpeg_flush(AVCodecContext *avctx){
2461 MpegEncContext *s = avctx->priv_data;
2463 if(s==NULL || s->picture==NULL)
2466 for(i=0; i<s->picture_count; i++){
2467 if (s->picture[i].f.data[0] &&
2468 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2469 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2470 free_frame_buffer(s, &s->picture[i]);
2472 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2474 s->mb_x= s->mb_y= 0;
2476 s->parse_context.state= -1;
2477 s->parse_context.frame_start_found= 0;
2478 s->parse_context.overread= 0;
2479 s->parse_context.overread_index= 0;
2480 s->parse_context.index= 0;
2481 s->parse_context.last_index= 0;
2482 s->bitstream_buffer_size=0;
2486 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2487 DCTELEM *block, int n, int qscale)
2489 int i, level, nCoeffs;
2490 const uint16_t *quant_matrix;
2492 nCoeffs= s->block_last_index[n];
2495 block[0] = block[0] * s->y_dc_scale;
2497 block[0] = block[0] * s->c_dc_scale;
2498 /* XXX: only mpeg1 */
2499 quant_matrix = s->intra_matrix;
2500 for(i=1;i<=nCoeffs;i++) {
2501 int j= s->intra_scantable.permutated[i];
2506 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2507 level = (level - 1) | 1;
2510 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2511 level = (level - 1) | 1;
2518 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2519 DCTELEM *block, int n, int qscale)
2521 int i, level, nCoeffs;
2522 const uint16_t *quant_matrix;
2524 nCoeffs= s->block_last_index[n];
2526 quant_matrix = s->inter_matrix;
2527 for(i=0; i<=nCoeffs; i++) {
2528 int j= s->intra_scantable.permutated[i];
2533 level = (((level << 1) + 1) * qscale *
2534 ((int) (quant_matrix[j]))) >> 4;
2535 level = (level - 1) | 1;
2538 level = (((level << 1) + 1) * qscale *
2539 ((int) (quant_matrix[j]))) >> 4;
2540 level = (level - 1) | 1;
2547 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2548 DCTELEM *block, int n, int qscale)
2550 int i, level, nCoeffs;
2551 const uint16_t *quant_matrix;
2553 if(s->alternate_scan) nCoeffs= 63;
2554 else nCoeffs= s->block_last_index[n];
2557 block[0] = block[0] * s->y_dc_scale;
2559 block[0] = block[0] * s->c_dc_scale;
2560 quant_matrix = s->intra_matrix;
2561 for(i=1;i<=nCoeffs;i++) {
2562 int j= s->intra_scantable.permutated[i];
2567 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2570 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2577 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2578 DCTELEM *block, int n, int qscale)
2580 int i, level, nCoeffs;
2581 const uint16_t *quant_matrix;
2584 if(s->alternate_scan) nCoeffs= 63;
2585 else nCoeffs= s->block_last_index[n];
2588 block[0] = block[0] * s->y_dc_scale;
2590 block[0] = block[0] * s->c_dc_scale;
2591 quant_matrix = s->intra_matrix;
2592 for(i=1;i<=nCoeffs;i++) {
2593 int j= s->intra_scantable.permutated[i];
2598 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2601 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2610 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2611 DCTELEM *block, int n, int qscale)
2613 int i, level, nCoeffs;
2614 const uint16_t *quant_matrix;
2617 if(s->alternate_scan) nCoeffs= 63;
2618 else nCoeffs= s->block_last_index[n];
2620 quant_matrix = s->inter_matrix;
2621 for(i=0; i<=nCoeffs; i++) {
2622 int j= s->intra_scantable.permutated[i];
2627 level = (((level << 1) + 1) * qscale *
2628 ((int) (quant_matrix[j]))) >> 4;
2631 level = (((level << 1) + 1) * qscale *
2632 ((int) (quant_matrix[j]))) >> 4;
2641 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2642 DCTELEM *block, int n, int qscale)
2644 int i, level, qmul, qadd;
2647 assert(s->block_last_index[n]>=0);
2653 block[0] = block[0] * s->y_dc_scale;
2655 block[0] = block[0] * s->c_dc_scale;
2656 qadd = (qscale - 1) | 1;
2663 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2665 for(i=1; i<=nCoeffs; i++) {
2669 level = level * qmul - qadd;
2671 level = level * qmul + qadd;
2678 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2679 DCTELEM *block, int n, int qscale)
2681 int i, level, qmul, qadd;
2684 assert(s->block_last_index[n]>=0);
2686 qadd = (qscale - 1) | 1;
2689 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2691 for(i=0; i<=nCoeffs; i++) {
2695 level = level * qmul - qadd;
2697 level = level * qmul + qadd;
2705 * set qscale and update qscale dependent variables.
2707 void ff_set_qscale(MpegEncContext * s, int qscale)
2711 else if (qscale > 31)
2715 s->chroma_qscale= s->chroma_qscale_table[qscale];
2717 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2718 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2721 void MPV_report_decode_progress(MpegEncContext *s)
2723 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2724 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);