2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
63 /* enable all paranoid tests for rounding, overflows, etc... */
69 static const uint8_t ff_default_chroma_qscale_table[32] = {
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
72 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
75 const uint8_t ff_mpeg1_dc_scale_table[128] = {
76 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
84 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
87 static const uint8_t mpeg2_dc_scale_table1[128] = {
88 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
96 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
99 static const uint8_t mpeg2_dc_scale_table2[128] = {
100 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
108 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
111 static const uint8_t mpeg2_dc_scale_table3[128] = {
112 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
120 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
124 ff_mpeg1_dc_scale_table,
125 mpeg2_dc_scale_table1,
126 mpeg2_dc_scale_table2,
127 mpeg2_dc_scale_table3,
130 const enum PixelFormat ff_pixfmt_list_420[] = {
135 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
143 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
145 uint32_t * restrict state)
153 for (i = 0; i < 3; i++) {
154 uint32_t tmp = *state << 8;
155 *state = tmp + *(p++);
156 if (tmp == 0x100 || p == end)
161 if (p[-1] > 1 ) p += 3;
162 else if (p[-2] ) p += 2;
163 else if (p[-3]|(p[-1]-1)) p++;
170 p = FFMIN(p, end) - 4;
176 /* init common dct for both encoder and decoder */
177 av_cold int ff_dct_common_init(MpegEncContext *s)
179 dsputil_init(&s->dsp, s->avctx);
181 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
182 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
183 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
184 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
186 if (s->flags & CODEC_FLAG_BITEXACT)
187 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
188 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
191 MPV_common_init_mmx(s);
193 MPV_common_init_axp(s);
195 MPV_common_init_mlib(s);
197 MPV_common_init_mmi(s);
199 MPV_common_init_arm(s);
201 MPV_common_init_altivec(s);
203 MPV_common_init_bfin(s);
206 /* load & permutate scantables
207 * note: only wmv uses different ones
209 if (s->alternate_scan) {
210 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
211 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
213 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
214 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
216 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
217 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
222 void ff_copy_picture(Picture *dst, Picture *src)
225 dst->f.type = FF_BUFFER_TYPE_COPY;
229 * Release a frame buffer
231 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
233 /* Windows Media Image codecs allocate internal buffers with different
234 * dimensions; ignore user defined callbacks for these
236 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
237 ff_thread_release_buffer(s->avctx, (AVFrame *) pic);
239 avcodec_default_release_buffer(s->avctx, (AVFrame *) pic);
240 av_freep(&pic->f.hwaccel_picture_private);
244 * Allocate a frame buffer
246 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
250 if (s->avctx->hwaccel) {
251 assert(!pic->f.hwaccel_picture_private);
252 if (s->avctx->hwaccel->priv_data_size) {
253 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
254 if (!pic->f.hwaccel_picture_private) {
255 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
261 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
262 r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic);
264 r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic);
266 if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
267 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
268 r, pic->f.age, pic->f.type, pic->f.data[0]);
269 av_freep(&pic->f.hwaccel_picture_private);
273 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
274 s->uvlinesize != pic->f.linesize[1])) {
275 av_log(s->avctx, AV_LOG_ERROR,
276 "get_buffer() failed (stride changed)\n");
277 free_frame_buffer(s, pic);
281 if (pic->f.linesize[1] != pic->f.linesize[2]) {
282 av_log(s->avctx, AV_LOG_ERROR,
283 "get_buffer() failed (uv stride mismatch)\n");
284 free_frame_buffer(s, pic);
292 * Allocate a Picture.
293 * The pixels are allocated/set by calling get_buffer() if shared = 0
295 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
297 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
299 // the + 1 is needed so memset(,,stride*height) does not sig11
301 const int mb_array_size = s->mb_stride * s->mb_height;
302 const int b8_array_size = s->b8_stride * s->mb_height * 2;
303 const int b4_array_size = s->b4_stride * s->mb_height * 4;
308 assert(pic->f.data[0]);
309 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
310 pic->f.type = FF_BUFFER_TYPE_SHARED;
312 assert(!pic->f.data[0]);
314 if (alloc_frame_buffer(s, pic) < 0)
317 s->linesize = pic->f.linesize[0];
318 s->uvlinesize = pic->f.linesize[1];
321 if (pic->f.qscale_table == NULL) {
323 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
324 mb_array_size * sizeof(int16_t), fail)
325 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
326 mb_array_size * sizeof(int16_t), fail)
327 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
328 mb_array_size * sizeof(int8_t ), fail)
331 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
332 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
333 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
334 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
336 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
337 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
339 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
340 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
341 if (s->out_format == FMT_H264) {
342 for (i = 0; i < 2; i++) {
343 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
344 2 * (b4_array_size + 4) * sizeof(int16_t),
346 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
347 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
348 4 * mb_array_size * sizeof(uint8_t), fail)
350 pic->f.motion_subsample_log2 = 2;
351 } else if (s->out_format == FMT_H263 || s->encoding ||
352 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
353 for (i = 0; i < 2; i++) {
354 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
355 2 * (b8_array_size + 4) * sizeof(int16_t),
357 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
358 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
359 4 * mb_array_size * sizeof(uint8_t), fail)
361 pic->f.motion_subsample_log2 = 3;
363 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
364 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
365 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
367 pic->f.qstride = s->mb_stride;
368 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
369 1 * sizeof(AVPanScan), fail)
372 /* It might be nicer if the application would keep track of these
373 * but it would require an API change. */
374 memmove(s->prev_pict_types + 1, s->prev_pict_types,
375 PREV_PICT_TYPES_BUFFER_SIZE-1);
376 s->prev_pict_types[0] = s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
377 if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE &&
378 s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B)
379 pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2
380 // and it is a bit tricky to skip them anyway.
384 fail: // for the FF_ALLOCZ_OR_GOTO macro
386 free_frame_buffer(s, pic);
391 * Deallocate a picture.
393 static void free_picture(MpegEncContext *s, Picture *pic)
397 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
398 free_frame_buffer(s, pic);
401 av_freep(&pic->mb_var);
402 av_freep(&pic->mc_mb_var);
403 av_freep(&pic->mb_mean);
404 av_freep(&pic->f.mbskip_table);
405 av_freep(&pic->qscale_table_base);
406 av_freep(&pic->mb_type_base);
407 av_freep(&pic->f.dct_coeff);
408 av_freep(&pic->f.pan_scan);
409 pic->f.mb_type = NULL;
410 for (i = 0; i < 2; i++) {
411 av_freep(&pic->motion_val_base[i]);
412 av_freep(&pic->f.ref_index[i]);
415 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
416 for (i = 0; i < 4; i++) {
418 pic->f.data[i] = NULL;
424 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
426 int y_size = s->b8_stride * (2 * s->mb_height + 1);
427 int c_size = s->mb_stride * (s->mb_height + 1);
428 int yc_size = y_size + 2 * c_size;
431 // edge emu needs blocksize + filter length - 1
432 // (= 17x17 for halfpel / 21x21 for h264)
433 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
434 (s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance
436 // FIXME should be linesize instead of s->width * 2
437 // but that is not known before get_buffer()
438 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
439 (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
440 s->me.temp = s->me.scratchpad;
441 s->rd_scratchpad = s->me.scratchpad;
442 s->b_scratchpad = s->me.scratchpad;
443 s->obmc_scratchpad = s->me.scratchpad + 16;
445 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
446 ME_MAP_SIZE * sizeof(uint32_t), fail)
447 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
448 ME_MAP_SIZE * sizeof(uint32_t), fail)
449 if (s->avctx->noise_reduction) {
450 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
451 2 * 64 * sizeof(int), fail)
454 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
455 s->block = s->blocks[0];
457 for (i = 0; i < 12; i++) {
458 s->pblocks[i] = &s->block[i];
461 if (s->out_format == FMT_H263) {
463 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
464 yc_size * sizeof(int16_t) * 16, fail);
465 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
466 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
467 s->ac_val[2] = s->ac_val[1] + c_size;
472 return -1; // free() through MPV_common_end()
475 static void free_duplicate_context(MpegEncContext *s)
480 av_freep(&s->edge_emu_buffer);
481 av_freep(&s->me.scratchpad);
485 s->obmc_scratchpad = NULL;
487 av_freep(&s->dct_error_sum);
488 av_freep(&s->me.map);
489 av_freep(&s->me.score_map);
490 av_freep(&s->blocks);
491 av_freep(&s->ac_val_base);
495 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
497 #define COPY(a) bak->a = src->a
498 COPY(edge_emu_buffer);
503 COPY(obmc_scratchpad);
510 COPY(me.map_generation);
522 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
526 // FIXME copy only needed parts
528 backup_duplicate_context(&bak, dst);
529 memcpy(dst, src, sizeof(MpegEncContext));
530 backup_duplicate_context(dst, &bak);
531 for (i = 0; i < 12; i++) {
532 dst->pblocks[i] = &dst->block[i];
534 // STOP_TIMER("update_duplicate_context")
535 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
538 int ff_mpeg_update_thread_context(AVCodecContext *dst,
539 const AVCodecContext *src)
541 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
543 if (dst == src || !s1->context_initialized)
546 // FIXME can parameters change on I-frames?
547 // in that case dst may need a reinit
548 if (!s->context_initialized) {
549 memcpy(s, s1, sizeof(MpegEncContext));
552 s->picture_range_start += MAX_PICTURE_COUNT;
553 s->picture_range_end += MAX_PICTURE_COUNT;
554 s->bitstream_buffer = NULL;
555 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
560 s->avctx->coded_height = s1->avctx->coded_height;
561 s->avctx->coded_width = s1->avctx->coded_width;
562 s->avctx->width = s1->avctx->width;
563 s->avctx->height = s1->avctx->height;
565 s->coded_picture_number = s1->coded_picture_number;
566 s->picture_number = s1->picture_number;
567 s->input_picture_number = s1->input_picture_number;
569 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
570 memcpy(&s->last_picture, &s1->last_picture,
571 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
573 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
574 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
575 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
577 memcpy(s->prev_pict_types, s1->prev_pict_types,
578 PREV_PICT_TYPES_BUFFER_SIZE);
580 // Error/bug resilience
581 s->next_p_frame_damaged = s1->next_p_frame_damaged;
582 s->workaround_bugs = s1->workaround_bugs;
585 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
586 (char *) &s1->shape - (char *) &s1->time_increment_bits);
589 s->max_b_frames = s1->max_b_frames;
590 s->low_delay = s1->low_delay;
591 s->dropable = s1->dropable;
593 // DivX handling (doesn't work)
594 s->divx_packed = s1->divx_packed;
596 if (s1->bitstream_buffer) {
597 if (s1->bitstream_buffer_size +
598 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
599 av_fast_malloc(&s->bitstream_buffer,
600 &s->allocated_bitstream_buffer_size,
601 s1->allocated_bitstream_buffer_size);
602 s->bitstream_buffer_size = s1->bitstream_buffer_size;
603 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
604 s1->bitstream_buffer_size);
605 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
606 FF_INPUT_BUFFER_PADDING_SIZE);
609 // MPEG2/interlacing info
610 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
611 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
613 if (!s1->first_field) {
614 s->last_pict_type = s1->pict_type;
615 if (s1->current_picture_ptr)
616 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
618 if (s1->pict_type != AV_PICTURE_TYPE_B) {
619 s->last_non_b_pict_type = s1->pict_type;
627 * Set the given MpegEncContext to common defaults
628 * (same for encoding and decoding).
629 * The changed fields will not depend upon the
630 * prior state of the MpegEncContext.
632 void MPV_common_defaults(MpegEncContext *s)
634 s->y_dc_scale_table =
635 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
636 s->chroma_qscale_table = ff_default_chroma_qscale_table;
637 s->progressive_frame = 1;
638 s->progressive_sequence = 1;
639 s->picture_structure = PICT_FRAME;
641 s->coded_picture_number = 0;
642 s->picture_number = 0;
643 s->input_picture_number = 0;
645 s->picture_in_gop_number = 0;
650 s->picture_range_start = 0;
651 s->picture_range_end = MAX_PICTURE_COUNT;
655 * Set the given MpegEncContext to defaults for decoding.
656 * the changed fields will not depend upon
657 * the prior state of the MpegEncContext.
659 void MPV_decode_defaults(MpegEncContext *s)
661 MPV_common_defaults(s);
665 * init common structure for both encoder and decoder.
666 * this assumes that some variables like width/height are already set
668 av_cold int MPV_common_init(MpegEncContext *s)
670 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y,
671 threads = (s->encoding ||
673 s->avctx->active_thread_type & FF_THREAD_SLICE)) ?
674 s->avctx->thread_count : 1;
676 if (s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
677 s->mb_height = (s->height + 31) / 32 * 2;
678 else if (s->codec_id != CODEC_ID_H264)
679 s->mb_height = (s->height + 15) / 16;
681 if (s->avctx->pix_fmt == PIX_FMT_NONE) {
682 av_log(s->avctx, AV_LOG_ERROR,
683 "decoding to PIX_FMT_NONE is not supported.\n");
687 if ((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
688 (s->avctx->thread_count > MAX_THREADS ||
689 (s->avctx->thread_count > s->mb_height && s->mb_height))) {
690 int max_threads = FFMIN(MAX_THREADS, s->mb_height);
691 av_log(s->avctx, AV_LOG_WARNING,
692 "too many threads (%d), reducing to %d\n",
693 s->avctx->thread_count, max_threads);
694 threads = max_threads;
697 if ((s->width || s->height) &&
698 av_image_check_size(s->width, s->height, 0, s->avctx))
701 ff_dct_common_init(s);
703 s->flags = s->avctx->flags;
704 s->flags2 = s->avctx->flags2;
706 if (s->width && s->height) {
707 s->mb_width = (s->width + 15) / 16;
708 s->mb_stride = s->mb_width + 1;
709 s->b8_stride = s->mb_width * 2 + 1;
710 s->b4_stride = s->mb_width * 4 + 1;
711 mb_array_size = s->mb_height * s->mb_stride;
712 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
714 /* set chroma shifts */
715 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
716 &(s->chroma_y_shift) );
718 /* set default edge pos, will be overriden
719 * in decode_header if needed */
720 s->h_edge_pos = s->mb_width * 16;
721 s->v_edge_pos = s->mb_height * 16;
723 s->mb_num = s->mb_width * s->mb_height;
728 s->block_wrap[3] = s->b8_stride;
730 s->block_wrap[5] = s->mb_stride;
732 y_size = s->b8_stride * (2 * s->mb_height + 1);
733 c_size = s->mb_stride * (s->mb_height + 1);
734 yc_size = y_size + 2 * c_size;
736 /* convert fourcc to upper case */
737 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
739 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
741 s->avctx->coded_frame = (AVFrame *)&s->current_picture;
743 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
744 fail); // error ressilience code looks cleaner with this
745 for (y = 0; y < s->mb_height; y++)
746 for (x = 0; x < s->mb_width; x++)
747 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
749 s->mb_index2xy[s->mb_height * s->mb_width] =
750 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
753 /* Allocate MV tables */
754 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
755 mv_table_size * 2 * sizeof(int16_t), fail);
756 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
757 mv_table_size * 2 * sizeof(int16_t), fail);
758 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
759 mv_table_size * 2 * sizeof(int16_t), fail);
760 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
761 mv_table_size * 2 * sizeof(int16_t), fail);
762 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
763 mv_table_size * 2 * sizeof(int16_t), fail);
764 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
765 mv_table_size * 2 * sizeof(int16_t), fail);
766 s->p_mv_table = s->p_mv_table_base +
768 s->b_forw_mv_table = s->b_forw_mv_table_base +
770 s->b_back_mv_table = s->b_back_mv_table_base +
772 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
774 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
776 s->b_direct_mv_table = s->b_direct_mv_table_base +
779 if (s->msmpeg4_version) {
780 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
781 2 * 2 * (MAX_LEVEL + 1) *
782 (MAX_RUN + 1) * 2 * sizeof(int), fail);
784 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
786 /* Allocate MB type table */
787 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
788 sizeof(uint16_t), fail); // needed for encoding
790 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
793 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
794 64 * 32 * sizeof(int), fail);
795 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
796 64 * 32 * sizeof(int), fail);
797 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
798 64 * 32 * 2 * sizeof(uint16_t), fail);
799 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
800 64 * 32 * 2 * sizeof(uint16_t), fail);
801 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
802 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
803 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
804 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
806 if (s->avctx->noise_reduction) {
807 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
808 2 * 64 * sizeof(uint16_t), fail);
813 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
814 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
815 s->picture_count * sizeof(Picture), fail);
816 for (i = 0; i < s->picture_count; i++) {
817 avcodec_get_frame_defaults((AVFrame *) &s->picture[i]);
820 if (s->width && s->height) {
821 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
822 mb_array_size * sizeof(uint8_t), fail);
824 if (s->codec_id == CODEC_ID_MPEG4 ||
825 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
826 /* interlaced direct mode decoding tables */
827 for (i = 0; i < 2; i++) {
829 for (j = 0; j < 2; j++) {
830 for (k = 0; k < 2; k++) {
831 FF_ALLOCZ_OR_GOTO(s->avctx,
832 s->b_field_mv_table_base[i][j][k],
833 mv_table_size * 2 * sizeof(int16_t),
835 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
838 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
839 mb_array_size * 2 * sizeof(uint8_t),
841 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
842 mv_table_size * 2 * sizeof(int16_t),
844 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
847 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
848 mb_array_size * 2 * sizeof(uint8_t),
852 if (s->out_format == FMT_H263) {
854 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
855 s->coded_block = s->coded_block_base + s->b8_stride + 1;
857 /* cbp, ac_pred, pred_dir */
858 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
859 mb_array_size * sizeof(uint8_t), fail);
860 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
861 mb_array_size * sizeof(uint8_t), fail);
864 if (s->h263_pred || s->h263_plus || !s->encoding) {
866 // MN: we need these for error resilience of intra-frames
867 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
868 yc_size * sizeof(int16_t), fail);
869 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
870 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
871 s->dc_val[2] = s->dc_val[1] + c_size;
872 for (i = 0; i < yc_size; i++)
873 s->dc_val_base[i] = 1024;
876 /* which mb is a intra block */
877 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
878 memset(s->mbintra_table, 1, mb_array_size);
880 /* init macroblock skip table */
881 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
882 // Note the + 1 is for a quicker mpeg4 slice_end detection
883 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types,
884 PREV_PICT_TYPES_BUFFER_SIZE, fail);
886 s->parse_context.state = -1;
887 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
888 s->avctx->debug_mv) {
889 s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
890 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
891 s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
892 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
893 s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
894 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
898 s->context_initialized = 1;
899 s->thread_context[0] = s;
901 if (s->width && s->height) {
902 if (s->encoding || (HAVE_THREADS &&
903 s->avctx->active_thread_type&FF_THREAD_SLICE)) {
904 for (i = 1; i < threads; i++) {
905 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
906 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
909 for (i = 0; i < threads; i++) {
910 if (init_duplicate_context(s->thread_context[i], s) < 0)
912 s->thread_context[i]->start_mb_y =
913 (s->mb_height * (i) + s->avctx->thread_count / 2) /
914 s->avctx->thread_count;
915 s->thread_context[i]->end_mb_y =
916 (s->mb_height * (i + 1) + s->avctx->thread_count / 2) /
917 s->avctx->thread_count;
920 if (init_duplicate_context(s, s) < 0)
923 s->end_mb_y = s->mb_height;
933 /* init common structure for both encoder and decoder */
934 void MPV_common_end(MpegEncContext *s)
938 if (s->encoding || (HAVE_THREADS &&
939 s->avctx->active_thread_type & FF_THREAD_SLICE)) {
940 for (i = 0; i < s->avctx->thread_count; i++) {
941 free_duplicate_context(s->thread_context[i]);
943 for (i = 1; i < s->avctx->thread_count; i++) {
944 av_freep(&s->thread_context[i]);
946 } else free_duplicate_context(s);
948 av_freep(&s->parse_context.buffer);
949 s->parse_context.buffer_size = 0;
951 av_freep(&s->mb_type);
952 av_freep(&s->p_mv_table_base);
953 av_freep(&s->b_forw_mv_table_base);
954 av_freep(&s->b_back_mv_table_base);
955 av_freep(&s->b_bidir_forw_mv_table_base);
956 av_freep(&s->b_bidir_back_mv_table_base);
957 av_freep(&s->b_direct_mv_table_base);
958 s->p_mv_table = NULL;
959 s->b_forw_mv_table = NULL;
960 s->b_back_mv_table = NULL;
961 s->b_bidir_forw_mv_table = NULL;
962 s->b_bidir_back_mv_table = NULL;
963 s->b_direct_mv_table = NULL;
964 for (i = 0; i < 2; i++) {
965 for (j = 0; j < 2; j++) {
966 for (k = 0; k < 2; k++) {
967 av_freep(&s->b_field_mv_table_base[i][j][k]);
968 s->b_field_mv_table[i][j][k] = NULL;
970 av_freep(&s->b_field_select_table[i][j]);
971 av_freep(&s->p_field_mv_table_base[i][j]);
972 s->p_field_mv_table[i][j] = NULL;
974 av_freep(&s->p_field_select_table[i]);
977 av_freep(&s->dc_val_base);
978 av_freep(&s->coded_block_base);
979 av_freep(&s->mbintra_table);
980 av_freep(&s->cbp_table);
981 av_freep(&s->pred_dir_table);
983 av_freep(&s->mbskip_table);
984 av_freep(&s->prev_pict_types);
985 av_freep(&s->bitstream_buffer);
986 s->allocated_bitstream_buffer_size = 0;
988 av_freep(&s->avctx->stats_out);
989 av_freep(&s->ac_stats);
990 av_freep(&s->error_status_table);
991 av_freep(&s->mb_index2xy);
992 av_freep(&s->lambda_table);
993 av_freep(&s->q_intra_matrix);
994 av_freep(&s->q_inter_matrix);
995 av_freep(&s->q_intra_matrix16);
996 av_freep(&s->q_inter_matrix16);
997 av_freep(&s->input_picture);
998 av_freep(&s->reordered_input_picture);
999 av_freep(&s->dct_offset);
1001 if (s->picture && !s->avctx->internal->is_copy) {
1002 for (i = 0; i < s->picture_count; i++) {
1003 free_picture(s, &s->picture[i]);
1006 av_freep(&s->picture);
1007 s->context_initialized = 0;
1008 s->last_picture_ptr =
1009 s->next_picture_ptr =
1010 s->current_picture_ptr = NULL;
1011 s->linesize = s->uvlinesize = 0;
1013 for (i = 0; i < 3; i++)
1014 av_freep(&s->visualization_buffer[i]);
1016 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
1017 avcodec_default_free_buffers(s->avctx);
1020 void init_rl(RLTable *rl,
1021 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1023 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1024 uint8_t index_run[MAX_RUN + 1];
1025 int last, run, level, start, end, i;
1027 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1028 if (static_store && rl->max_level[0])
1031 /* compute max_level[], max_run[] and index_run[] */
1032 for (last = 0; last < 2; last++) {
1041 memset(max_level, 0, MAX_RUN + 1);
1042 memset(max_run, 0, MAX_LEVEL + 1);
1043 memset(index_run, rl->n, MAX_RUN + 1);
1044 for (i = start; i < end; i++) {
1045 run = rl->table_run[i];
1046 level = rl->table_level[i];
1047 if (index_run[run] == rl->n)
1049 if (level > max_level[run])
1050 max_level[run] = level;
1051 if (run > max_run[level])
1052 max_run[level] = run;
1055 rl->max_level[last] = static_store[last];
1057 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1058 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1060 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1062 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1063 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1065 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1067 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1068 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1072 void init_vlc_rl(RLTable *rl)
1076 for (q = 0; q < 32; q++) {
1078 int qadd = (q - 1) | 1;
1084 for (i = 0; i < rl->vlc.table_size; i++) {
1085 int code = rl->vlc.table[i][0];
1086 int len = rl->vlc.table[i][1];
1089 if (len == 0) { // illegal code
1092 } else if (len < 0) { // more bits needed
1096 if (code == rl->n) { // esc
1100 run = rl->table_run[code] + 1;
1101 level = rl->table_level[code] * qmul + qadd;
1102 if (code >= rl->last) run += 192;
1105 rl->rl_vlc[q][i].len = len;
1106 rl->rl_vlc[q][i].level = level;
1107 rl->rl_vlc[q][i].run = run;
1112 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1116 /* release non reference frames */
1117 for (i = 0; i < s->picture_count; i++) {
1118 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1119 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1120 (remove_current || &s->picture[i] != s->current_picture_ptr)
1121 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1122 free_frame_buffer(s, &s->picture[i]);
1127 int ff_find_unused_picture(MpegEncContext *s, int shared)
1132 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1133 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1137 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1138 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
1141 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1142 if (s->picture[i].f.data[0] == NULL)
1147 av_log(s->avctx, AV_LOG_FATAL,
1148 "Internal error, picture buffer overflow\n");
1149 /* We could return -1, but the codec would crash trying to draw into a
1150 * non-existing frame anyway. This is safer than waiting for a random crash.
1151 * Also the return of this is never useful, an encoder must only allocate
1152 * as much as allowed in the specification. This has no relationship to how
1153 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1154 * enough for such valid streams).
1155 * Plus, a decoder has to check stream validity and remove frames if too
1156 * many reference frames are around. Waiting for "OOM" is not correct at
1157 * all. Similarly, missing reference frames have to be replaced by
1158 * interpolated/MC frames, anything else is a bug in the codec ...
1164 static void update_noise_reduction(MpegEncContext *s){
1167 for(intra=0; intra<2; intra++){
1168 if(s->dct_count[intra] > (1<<16)){
1169 for(i=0; i<64; i++){
1170 s->dct_error_sum[intra][i] >>=1;
1172 s->dct_count[intra] >>= 1;
1175 for(i=0; i<64; i++){
1176 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1182 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1184 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1190 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1192 /* mark&release old frames */
1193 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f.data[0]) {
1194 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1195 if (s->last_picture_ptr->owner2 == s)
1196 free_frame_buffer(s, s->last_picture_ptr);
1198 /* release forgotten pictures */
1199 /* if(mpeg124/h263) */
1201 for(i=0; i<s->picture_count; i++){
1202 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].f.reference) {
1203 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1204 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1205 free_frame_buffer(s, &s->picture[i]);
1213 ff_release_unused_pictures(s, 1);
1215 if (s->current_picture_ptr && s->current_picture_ptr->f.data[0] == NULL)
1216 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
1218 i= ff_find_unused_picture(s, 0);
1219 pic= &s->picture[i];
1222 pic->f.reference = 0;
1224 if (s->codec_id == CODEC_ID_H264)
1225 pic->f.reference = s->picture_structure;
1226 else if (s->pict_type != AV_PICTURE_TYPE_B)
1227 pic->f.reference = 3;
1230 pic->f.coded_picture_number = s->coded_picture_number++;
1232 if(ff_alloc_picture(s, pic, 0) < 0)
1235 s->current_picture_ptr= pic;
1236 //FIXME use only the vars from current_pic
1237 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1238 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1239 if(s->picture_structure != PICT_FRAME)
1240 s->current_picture_ptr->f.top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1242 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && !s->progressive_sequence;
1243 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1246 s->current_picture_ptr->f.pict_type = s->pict_type;
1247 // if(s->flags && CODEC_FLAG_QSCALE)
1248 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1249 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1251 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1253 if (s->pict_type != AV_PICTURE_TYPE_B) {
1254 s->last_picture_ptr= s->next_picture_ptr;
1256 s->next_picture_ptr= s->current_picture_ptr;
1258 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1259 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1260 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1261 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1262 s->pict_type, s->dropable);*/
1264 if(s->codec_id != CODEC_ID_H264){
1265 if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f.data[0] == NULL) &&
1266 (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
1267 if (s->pict_type != AV_PICTURE_TYPE_I)
1268 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1269 else if (s->picture_structure != PICT_FRAME)
1270 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
1272 /* Allocate a dummy frame */
1273 i= ff_find_unused_picture(s, 0);
1274 s->last_picture_ptr= &s->picture[i];
1275 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1277 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
1278 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
1280 if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.data[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) {
1281 /* Allocate a dummy frame */
1282 i= ff_find_unused_picture(s, 0);
1283 s->next_picture_ptr= &s->picture[i];
1284 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1286 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
1287 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
1291 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1292 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1294 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f.data[0]));
1296 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1299 if(s->picture_structure == PICT_BOTTOM_FIELD){
1300 s->current_picture.f.data[i] += s->current_picture.f.linesize[i];
1302 s->current_picture.f.linesize[i] *= 2;
1303 s->last_picture.f.linesize[i] *= 2;
1304 s->next_picture.f.linesize[i] *= 2;
1308 s->err_recognition = avctx->err_recognition;
1310 /* set dequantizer, we can't do it during init as it might change for mpeg4
1311 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1312 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1313 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1314 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1315 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1316 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1317 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1319 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1320 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1323 if(s->dct_error_sum){
1324 assert(s->avctx->noise_reduction && s->encoding);
1326 update_noise_reduction(s);
1329 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1330 return ff_xvmc_field_start(s, avctx);
1335 /* generic function for encode/decode called after a frame has been coded/decoded */
1336 void MPV_frame_end(MpegEncContext *s)
1339 /* redraw edges for the frame if decoding didn't complete */
1340 //just to make sure that all data is rendered.
1341 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1342 ff_xvmc_field_end(s);
1343 }else if((s->error_count || s->encoding)
1344 && !s->avctx->hwaccel
1345 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1346 && s->unrestricted_mv
1347 && s->current_picture.f.reference
1349 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1350 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1351 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1352 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1353 s->h_edge_pos , s->v_edge_pos,
1354 EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
1355 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1356 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1357 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1358 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1359 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1360 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1365 s->last_pict_type = s->pict_type;
1366 s->last_lambda_for[s->pict_type] = s->current_picture_ptr->f.quality;
1367 if(s->pict_type!=AV_PICTURE_TYPE_B){
1368 s->last_non_b_pict_type= s->pict_type;
1371 /* copy back current_picture variables */
1372 for(i=0; i<MAX_PICTURE_COUNT; i++){
1373 if(s->picture[i].f.data[0] == s->current_picture.f.data[0]){
1374 s->picture[i]= s->current_picture;
1378 assert(i<MAX_PICTURE_COUNT);
1382 /* release non-reference frames */
1383 for(i=0; i<s->picture_count; i++){
1384 if (s->picture[i].f.data[0] && !s->picture[i].f.reference /*&& s->picture[i].type != FF_BUFFER_TYPE_SHARED*/) {
1385 free_frame_buffer(s, &s->picture[i]);
1389 // clear copies, to avoid confusion
1391 memset(&s->last_picture, 0, sizeof(Picture));
1392 memset(&s->next_picture, 0, sizeof(Picture));
1393 memset(&s->current_picture, 0, sizeof(Picture));
1395 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1397 if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
1398 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
1403 * Draw a line from (ex, ey) -> (sx, sy).
1404 * @param w width of the image
1405 * @param h height of the image
1406 * @param stride stride/linesize of the image
1407 * @param color color of the arrow
1409 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1412 sx= av_clip(sx, 0, w-1);
1413 sy= av_clip(sy, 0, h-1);
1414 ex= av_clip(ex, 0, w-1);
1415 ey= av_clip(ey, 0, h-1);
1417 buf[sy*stride + sx]+= color;
1419 if(FFABS(ex - sx) > FFABS(ey - sy)){
1421 FFSWAP(int, sx, ex);
1422 FFSWAP(int, sy, ey);
1424 buf+= sx + sy*stride;
1426 f= ((ey-sy)<<16)/ex;
1427 for(x= 0; x <= ex; x++){
1430 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1431 buf[(y+1)*stride + x]+= (color* fr )>>16;
1435 FFSWAP(int, sx, ex);
1436 FFSWAP(int, sy, ey);
1438 buf+= sx + sy*stride;
1440 if(ey) f= ((ex-sx)<<16)/ey;
1442 for(y= 0; y <= ey; y++){
1445 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1446 buf[y*stride + x+1]+= (color* fr )>>16;
1452 * Draw an arrow from (ex, ey) -> (sx, sy).
1453 * @param w width of the image
1454 * @param h height of the image
1455 * @param stride stride/linesize of the image
1456 * @param color color of the arrow
1458 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1461 sx= av_clip(sx, -100, w+100);
1462 sy= av_clip(sy, -100, h+100);
1463 ex= av_clip(ex, -100, w+100);
1464 ey= av_clip(ey, -100, h+100);
1469 if(dx*dx + dy*dy > 3*3){
1472 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1474 //FIXME subpixel accuracy
1475 rx= ROUNDED_DIV(rx*3<<4, length);
1476 ry= ROUNDED_DIV(ry*3<<4, length);
1478 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1479 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1481 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1485 * Print debuging info for the given picture.
1487 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1489 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1491 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1494 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1495 switch (pict->pict_type) {
1496 case AV_PICTURE_TYPE_I: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1497 case AV_PICTURE_TYPE_P: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1498 case AV_PICTURE_TYPE_B: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1499 case AV_PICTURE_TYPE_S: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1500 case AV_PICTURE_TYPE_SI: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1501 case AV_PICTURE_TYPE_SP: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1503 for(y=0; y<s->mb_height; y++){
1504 for(x=0; x<s->mb_width; x++){
1505 if(s->avctx->debug&FF_DEBUG_SKIP){
1506 int count= s->mbskip_table[x + y*s->mb_stride];
1507 if(count>9) count=9;
1508 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1510 if(s->avctx->debug&FF_DEBUG_QP){
1511 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1513 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1514 int mb_type= pict->mb_type[x + y*s->mb_stride];
1515 //Type & MV direction
1517 av_log(s->avctx, AV_LOG_DEBUG, "P");
1518 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1519 av_log(s->avctx, AV_LOG_DEBUG, "A");
1520 else if(IS_INTRA4x4(mb_type))
1521 av_log(s->avctx, AV_LOG_DEBUG, "i");
1522 else if(IS_INTRA16x16(mb_type))
1523 av_log(s->avctx, AV_LOG_DEBUG, "I");
1524 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1525 av_log(s->avctx, AV_LOG_DEBUG, "d");
1526 else if(IS_DIRECT(mb_type))
1527 av_log(s->avctx, AV_LOG_DEBUG, "D");
1528 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1529 av_log(s->avctx, AV_LOG_DEBUG, "g");
1530 else if(IS_GMC(mb_type))
1531 av_log(s->avctx, AV_LOG_DEBUG, "G");
1532 else if(IS_SKIP(mb_type))
1533 av_log(s->avctx, AV_LOG_DEBUG, "S");
1534 else if(!USES_LIST(mb_type, 1))
1535 av_log(s->avctx, AV_LOG_DEBUG, ">");
1536 else if(!USES_LIST(mb_type, 0))
1537 av_log(s->avctx, AV_LOG_DEBUG, "<");
1539 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1540 av_log(s->avctx, AV_LOG_DEBUG, "X");
1545 av_log(s->avctx, AV_LOG_DEBUG, "+");
1546 else if(IS_16X8(mb_type))
1547 av_log(s->avctx, AV_LOG_DEBUG, "-");
1548 else if(IS_8X16(mb_type))
1549 av_log(s->avctx, AV_LOG_DEBUG, "|");
1550 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1551 av_log(s->avctx, AV_LOG_DEBUG, " ");
1553 av_log(s->avctx, AV_LOG_DEBUG, "?");
1556 if(IS_INTERLACED(mb_type))
1557 av_log(s->avctx, AV_LOG_DEBUG, "=");
1559 av_log(s->avctx, AV_LOG_DEBUG, " ");
1561 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1563 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1567 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1568 s->avctx->debug_mv) {
1569 const int shift= 1 + s->quarter_sample;
1573 int h_chroma_shift, v_chroma_shift, block_height;
1574 const int width = s->avctx->width;
1575 const int height= s->avctx->height;
1576 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1577 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1578 s->low_delay=0; //needed to see the vectors without trashing the buffers
1580 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1582 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1583 pict->data[i]= s->visualization_buffer[i];
1585 pict->type= FF_BUFFER_TYPE_COPY;
1587 block_height = 16>>v_chroma_shift;
1589 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1591 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1592 const int mb_index= mb_x + mb_y*s->mb_stride;
1593 if (s->avctx->debug_mv && pict->motion_val) {
1595 for(type=0; type<3; type++){
1598 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
1602 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1606 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1611 if(!USES_LIST(pict->mb_type[mb_index], direction))
1614 if(IS_8X8(pict->mb_type[mb_index])){
1617 int sx= mb_x*16 + 4 + 8*(i&1);
1618 int sy= mb_y*16 + 4 + 8*(i>>1);
1619 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1620 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1621 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1622 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1624 }else if(IS_16X8(pict->mb_type[mb_index])){
1628 int sy=mb_y*16 + 4 + 8*i;
1629 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1630 int mx=(pict->motion_val[direction][xy][0]>>shift);
1631 int my=(pict->motion_val[direction][xy][1]>>shift);
1633 if(IS_INTERLACED(pict->mb_type[mb_index]))
1636 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1638 }else if(IS_8X16(pict->mb_type[mb_index])){
1641 int sx=mb_x*16 + 4 + 8*i;
1643 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1644 int mx=(pict->motion_val[direction][xy][0]>>shift);
1645 int my=(pict->motion_val[direction][xy][1]>>shift);
1647 if(IS_INTERLACED(pict->mb_type[mb_index]))
1650 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1653 int sx= mb_x*16 + 8;
1654 int sy= mb_y*16 + 8;
1655 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1656 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1657 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1658 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1662 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1663 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1665 for(y=0; y<block_height; y++){
1666 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1667 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1670 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1671 int mb_type= pict->mb_type[mb_index];
1674 #define COLOR(theta, r)\
1675 u= (int)(128 + r*cos(theta*3.141592/180));\
1676 v= (int)(128 + r*sin(theta*3.141592/180));
1680 if(IS_PCM(mb_type)){
1682 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1684 }else if(IS_INTRA4x4(mb_type)){
1686 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1688 }else if(IS_DIRECT(mb_type)){
1690 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1692 }else if(IS_GMC(mb_type)){
1694 }else if(IS_SKIP(mb_type)){
1696 }else if(!USES_LIST(mb_type, 1)){
1698 }else if(!USES_LIST(mb_type, 0)){
1701 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1705 u*= 0x0101010101010101ULL;
1706 v*= 0x0101010101010101ULL;
1707 for(y=0; y<block_height; y++){
1708 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1709 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1713 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1714 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1715 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1717 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1719 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1721 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1722 int dm= 1 << (mv_sample_log2-2);
1724 int sx= mb_x*16 + 8*(i&1);
1725 int sy= mb_y*16 + 8*(i>>1);
1726 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1728 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1729 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1731 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1732 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1733 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1737 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1741 s->mbskip_table[mb_index]=0;
1747 static inline int hpel_motion_lowres(MpegEncContext *s,
1748 uint8_t *dest, uint8_t *src,
1749 int field_based, int field_select,
1750 int src_x, int src_y,
1751 int width, int height, int stride,
1752 int h_edge_pos, int v_edge_pos,
1753 int w, int h, h264_chroma_mc_func *pix_op,
1754 int motion_x, int motion_y)
1756 const int lowres= s->avctx->lowres;
1757 const int op_index= FFMIN(lowres, 2);
1758 const int s_mask= (2<<lowres)-1;
1762 if(s->quarter_sample){
1767 sx= motion_x & s_mask;
1768 sy= motion_y & s_mask;
1769 src_x += motion_x >> (lowres+1);
1770 src_y += motion_y >> (lowres+1);
1772 src += src_y * stride + src_x;
1774 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1775 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1776 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1777 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1778 src= s->edge_emu_buffer;
1782 sx= (sx << 2) >> lowres;
1783 sy= (sy << 2) >> lowres;
1786 pix_op[op_index](dest, src, stride, h, sx, sy);
1790 /* apply one mpeg motion vector to the three components */
1791 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1792 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1793 int field_based, int bottom_field, int field_select,
1794 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1795 int motion_x, int motion_y, int h, int mb_y)
1797 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1798 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1799 const int lowres= s->avctx->lowres;
1800 const int op_index= FFMIN(lowres, 2);
1801 const int block_s= 8>>lowres;
1802 const int s_mask= (2<<lowres)-1;
1803 const int h_edge_pos = s->h_edge_pos >> lowres;
1804 const int v_edge_pos = s->v_edge_pos >> lowres;
1805 linesize = s->current_picture.f.linesize[0] << field_based;
1806 uvlinesize = s->current_picture.f.linesize[1] << field_based;
1808 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1814 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1817 sx= motion_x & s_mask;
1818 sy= motion_y & s_mask;
1819 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1820 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1822 if (s->out_format == FMT_H263) {
1823 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1824 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1827 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1830 uvsx = (2*mx) & s_mask;
1831 uvsy = (2*my) & s_mask;
1832 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1833 uvsrc_y = mb_y*block_s + (my >> lowres);
1839 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1840 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1843 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1844 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1845 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1847 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1848 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1849 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1850 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1851 ptr_y = s->edge_emu_buffer;
1852 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1853 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1854 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1855 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1856 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1857 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1863 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
1864 dest_y += s->linesize;
1865 dest_cb+= s->uvlinesize;
1866 dest_cr+= s->uvlinesize;
1870 ptr_y += s->linesize;
1871 ptr_cb+= s->uvlinesize;
1872 ptr_cr+= s->uvlinesize;
1875 sx= (sx << 2) >> lowres;
1876 sy= (sy << 2) >> lowres;
1877 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1879 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1880 uvsx= (uvsx << 2) >> lowres;
1881 uvsy= (uvsy << 2) >> lowres;
1882 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1883 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1885 //FIXME h261 lowres loop filter
1888 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1889 uint8_t *dest_cb, uint8_t *dest_cr,
1890 uint8_t **ref_picture,
1891 h264_chroma_mc_func *pix_op,
1893 const int lowres= s->avctx->lowres;
1894 const int op_index= FFMIN(lowres, 2);
1895 const int block_s= 8>>lowres;
1896 const int s_mask= (2<<lowres)-1;
1897 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1898 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1899 int emu=0, src_x, src_y, offset, sx, sy;
1902 if(s->quarter_sample){
1907 /* In case of 8X8, we construct a single chroma motion vector
1908 with a special rounding */
1909 mx= ff_h263_round_chroma(mx);
1910 my= ff_h263_round_chroma(my);
1914 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1915 src_y = s->mb_y*block_s + (my >> (lowres+1));
1917 offset = src_y * s->uvlinesize + src_x;
1918 ptr = ref_picture[1] + offset;
1919 if(s->flags&CODEC_FLAG_EMU_EDGE){
1920 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1921 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1922 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1923 ptr= s->edge_emu_buffer;
1927 sx= (sx << 2) >> lowres;
1928 sy= (sy << 2) >> lowres;
1929 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1931 ptr = ref_picture[2] + offset;
1933 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1934 ptr= s->edge_emu_buffer;
1936 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1940 * motion compensation of a single macroblock
1942 * @param dest_y luma destination pointer
1943 * @param dest_cb chroma cb/u destination pointer
1944 * @param dest_cr chroma cr/v destination pointer
1945 * @param dir direction (0->forward, 1->backward)
1946 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1947 * @param pix_op halfpel motion compensation function (average or put normally)
1948 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1950 static inline void MPV_motion_lowres(MpegEncContext *s,
1951 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1952 int dir, uint8_t **ref_picture,
1953 h264_chroma_mc_func *pix_op)
1957 const int lowres= s->avctx->lowres;
1958 const int block_s= 8>>lowres;
1963 switch(s->mv_type) {
1965 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1967 ref_picture, pix_op,
1968 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1974 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1975 ref_picture[0], 0, 0,
1976 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1977 s->width, s->height, s->linesize,
1978 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1979 block_s, block_s, pix_op,
1980 s->mv[dir][i][0], s->mv[dir][i][1]);
1982 mx += s->mv[dir][i][0];
1983 my += s->mv[dir][i][1];
1986 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1987 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1990 if (s->picture_structure == PICT_FRAME) {
1992 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1993 1, 0, s->field_select[dir][0],
1994 ref_picture, pix_op,
1995 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1997 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1998 1, 1, s->field_select[dir][1],
1999 ref_picture, pix_op,
2000 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
2002 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
2003 ref_picture = s->current_picture_ptr->f.data;
2006 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2007 0, 0, s->field_select[dir][0],
2008 ref_picture, pix_op,
2009 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
2014 uint8_t ** ref2picture;
2016 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
2017 ref2picture= ref_picture;
2019 ref2picture = s->current_picture_ptr->f.data;
2022 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2023 0, 0, s->field_select[dir][i],
2024 ref2picture, pix_op,
2025 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
2027 dest_y += 2*block_s*s->linesize;
2028 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
2029 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
2033 if(s->picture_structure == PICT_FRAME){
2037 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2039 ref_picture, pix_op,
2040 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
2042 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2046 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2047 0, 0, s->picture_structure != i+1,
2048 ref_picture, pix_op,
2049 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
2051 // after put we make avg of the same block
2052 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2054 //opposite parity is always in the same frame if this is second field
2055 if(!s->first_field){
2056 ref_picture = s->current_picture_ptr->f.data;
2066 * find the lowest MB row referenced in the MVs
2068 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2070 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2071 int my, off, i, mvs;
2073 if (s->picture_structure != PICT_FRAME) goto unhandled;
2075 switch (s->mv_type) {
2089 for (i = 0; i < mvs; i++) {
2090 my = s->mv[dir][i][1]<<qpel_shift;
2091 my_max = FFMAX(my_max, my);
2092 my_min = FFMIN(my_min, my);
2095 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2097 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2099 return s->mb_height-1;
2102 /* put block[] to dest[] */
2103 static inline void put_dct(MpegEncContext *s,
2104 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2106 s->dct_unquantize_intra(s, block, i, qscale);
2107 s->dsp.idct_put (dest, line_size, block);
2110 /* add block[] to dest[] */
2111 static inline void add_dct(MpegEncContext *s,
2112 DCTELEM *block, int i, uint8_t *dest, int line_size)
2114 if (s->block_last_index[i] >= 0) {
2115 s->dsp.idct_add (dest, line_size, block);
2119 static inline void add_dequant_dct(MpegEncContext *s,
2120 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2122 if (s->block_last_index[i] >= 0) {
2123 s->dct_unquantize_inter(s, block, i, qscale);
2125 s->dsp.idct_add (dest, line_size, block);
2130 * Clean dc, ac, coded_block for the current non-intra MB.
2132 void ff_clean_intra_table_entries(MpegEncContext *s)
2134 int wrap = s->b8_stride;
2135 int xy = s->block_index[0];
2138 s->dc_val[0][xy + 1 ] =
2139 s->dc_val[0][xy + wrap] =
2140 s->dc_val[0][xy + 1 + wrap] = 1024;
2142 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2143 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2144 if (s->msmpeg4_version>=3) {
2145 s->coded_block[xy ] =
2146 s->coded_block[xy + 1 ] =
2147 s->coded_block[xy + wrap] =
2148 s->coded_block[xy + 1 + wrap] = 0;
2151 wrap = s->mb_stride;
2152 xy = s->mb_x + s->mb_y * wrap;
2154 s->dc_val[2][xy] = 1024;
2156 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2157 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2159 s->mbintra_table[xy]= 0;
2162 /* generic function called after a macroblock has been parsed by the
2163 decoder or after it has been encoded by the encoder.
2165 Important variables used:
2166 s->mb_intra : true if intra macroblock
2167 s->mv_dir : motion vector direction
2168 s->mv_type : motion vector type
2169 s->mv : motion vector
2170 s->interlaced_dct : true if interlaced dct used (mpeg2)
2172 static av_always_inline
2173 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2174 int lowres_flag, int is_mpeg12)
2176 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2177 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2178 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2182 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2183 /* save DCT coefficients */
2185 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2186 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2188 for(j=0; j<64; j++){
2189 *dct++ = block[i][s->dsp.idct_permutation[j]];
2190 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2192 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2196 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2198 /* update DC predictors for P macroblocks */
2200 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2201 if(s->mbintra_table[mb_xy])
2202 ff_clean_intra_table_entries(s);
2206 s->last_dc[2] = 128 << s->intra_dc_precision;
2209 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2210 s->mbintra_table[mb_xy]=1;
2212 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2213 uint8_t *dest_y, *dest_cb, *dest_cr;
2214 int dct_linesize, dct_offset;
2215 op_pixels_func (*op_pix)[4];
2216 qpel_mc_func (*op_qpix)[16];
2217 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2218 const int uvlinesize = s->current_picture.f.linesize[1];
2219 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2220 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2222 /* avoid copy if macroblock skipped in last frame too */
2223 /* skip only during decoding as we might trash the buffers during encoding a bit */
2225 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2226 const int age = s->current_picture.f.age;
2230 if (s->mb_skipped) {
2232 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2234 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
2235 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2237 /* if previous was skipped too, then nothing to do ! */
2238 if (*mbskip_ptr >= age && s->current_picture.f.reference){
2241 } else if(!s->current_picture.f.reference) {
2242 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2243 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2245 *mbskip_ptr = 0; /* not skipped */
2249 dct_linesize = linesize << s->interlaced_dct;
2250 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
2254 dest_cb= s->dest[1];
2255 dest_cr= s->dest[2];
2257 dest_y = s->b_scratchpad;
2258 dest_cb= s->b_scratchpad+16*linesize;
2259 dest_cr= s->b_scratchpad+32*linesize;
2263 /* motion handling */
2264 /* decoding or more than one mb_type (MC was already done otherwise) */
2267 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2268 if (s->mv_dir & MV_DIR_FORWARD) {
2269 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
2271 if (s->mv_dir & MV_DIR_BACKWARD) {
2272 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
2277 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2279 if (s->mv_dir & MV_DIR_FORWARD) {
2280 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2281 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2283 if (s->mv_dir & MV_DIR_BACKWARD) {
2284 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2287 op_qpix= s->me.qpel_put;
2288 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2289 op_pix = s->dsp.put_pixels_tab;
2291 op_pix = s->dsp.put_no_rnd_pixels_tab;
2293 if (s->mv_dir & MV_DIR_FORWARD) {
2294 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2295 op_pix = s->dsp.avg_pixels_tab;
2296 op_qpix= s->me.qpel_avg;
2298 if (s->mv_dir & MV_DIR_BACKWARD) {
2299 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2304 /* skip dequant / idct if we are really late ;) */
2305 if(s->avctx->skip_idct){
2306 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2307 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2308 || s->avctx->skip_idct >= AVDISCARD_ALL)
2312 /* add dct residue */
2313 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2314 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2315 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2316 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2317 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2318 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2320 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2321 if (s->chroma_y_shift){
2322 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2323 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2327 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2328 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2329 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2330 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2333 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2334 add_dct(s, block[0], 0, dest_y , dct_linesize);
2335 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2336 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2337 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2339 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2340 if(s->chroma_y_shift){//Chroma420
2341 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2342 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2345 dct_linesize = uvlinesize << s->interlaced_dct;
2346 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2348 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2349 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2350 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2351 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2352 if(!s->chroma_x_shift){//Chroma444
2353 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2354 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2355 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2356 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2361 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2362 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2365 /* dct only in intra block */
2366 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2367 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2368 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2369 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2370 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2372 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2373 if(s->chroma_y_shift){
2374 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2375 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2379 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2380 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2381 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2382 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2386 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2387 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2388 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2389 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2391 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2392 if(s->chroma_y_shift){
2393 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2394 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2397 dct_linesize = uvlinesize << s->interlaced_dct;
2398 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2400 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2401 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2402 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2403 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2404 if(!s->chroma_x_shift){//Chroma444
2405 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2406 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2407 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2408 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2416 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2417 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2418 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2423 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2425 if(s->out_format == FMT_MPEG1) {
2426 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2427 else MPV_decode_mb_internal(s, block, 0, 1);
2430 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2431 else MPV_decode_mb_internal(s, block, 0, 0);
2435 * @param h is the normal height, this will be reduced automatically if needed for the last row
2437 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2438 const int field_pic= s->picture_structure != PICT_FRAME;
2444 if (!s->avctx->hwaccel
2445 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2446 && s->unrestricted_mv
2447 && s->current_picture.f.reference
2449 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2450 int sides = 0, edge_h;
2451 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2452 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2453 if (y==0) sides |= EDGE_TOP;
2454 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2456 edge_h= FFMIN(h, s->v_edge_pos - y);
2458 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2459 s->linesize, s->h_edge_pos, edge_h,
2460 EDGE_WIDTH, EDGE_WIDTH, sides);
2461 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2462 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2463 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2464 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2465 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2466 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2469 h= FFMIN(h, s->avctx->height - y);
2471 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2473 if (s->avctx->draw_horiz_band) {
2475 int offset[AV_NUM_DATA_POINTERS];
2478 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2479 src= (AVFrame*)s->current_picture_ptr;
2480 else if(s->last_picture_ptr)
2481 src= (AVFrame*)s->last_picture_ptr;
2485 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2486 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2489 offset[0]= y * s->linesize;
2491 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2492 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2498 s->avctx->draw_horiz_band(s->avctx, src, offset,
2499 y, s->picture_structure, h);
2503 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2504 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2505 const int uvlinesize = s->current_picture.f.linesize[1];
2506 const int mb_size= 4 - s->avctx->lowres;
2508 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2509 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2510 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2511 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2512 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2513 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2514 //block_index is not used by mpeg2, so it is not affected by chroma_format
2516 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2517 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2518 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2520 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2522 if(s->picture_structure==PICT_FRAME){
2523 s->dest[0] += s->mb_y * linesize << mb_size;
2524 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2525 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2527 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2528 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2529 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2530 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2535 void ff_mpeg_flush(AVCodecContext *avctx){
2537 MpegEncContext *s = avctx->priv_data;
2539 if(s==NULL || s->picture==NULL)
2542 for(i=0; i<s->picture_count; i++){
2543 if (s->picture[i].f.data[0] &&
2544 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2545 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2546 free_frame_buffer(s, &s->picture[i]);
2548 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2550 s->mb_x= s->mb_y= 0;
2552 s->parse_context.state= -1;
2553 s->parse_context.frame_start_found= 0;
2554 s->parse_context.overread= 0;
2555 s->parse_context.overread_index= 0;
2556 s->parse_context.index= 0;
2557 s->parse_context.last_index= 0;
2558 s->bitstream_buffer_size=0;
2562 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2563 DCTELEM *block, int n, int qscale)
2565 int i, level, nCoeffs;
2566 const uint16_t *quant_matrix;
2568 nCoeffs= s->block_last_index[n];
2571 block[0] = block[0] * s->y_dc_scale;
2573 block[0] = block[0] * s->c_dc_scale;
2574 /* XXX: only mpeg1 */
2575 quant_matrix = s->intra_matrix;
2576 for(i=1;i<=nCoeffs;i++) {
2577 int j= s->intra_scantable.permutated[i];
2582 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2583 level = (level - 1) | 1;
2586 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2587 level = (level - 1) | 1;
2594 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2595 DCTELEM *block, int n, int qscale)
2597 int i, level, nCoeffs;
2598 const uint16_t *quant_matrix;
2600 nCoeffs= s->block_last_index[n];
2602 quant_matrix = s->inter_matrix;
2603 for(i=0; i<=nCoeffs; i++) {
2604 int j= s->intra_scantable.permutated[i];
2609 level = (((level << 1) + 1) * qscale *
2610 ((int) (quant_matrix[j]))) >> 4;
2611 level = (level - 1) | 1;
2614 level = (((level << 1) + 1) * qscale *
2615 ((int) (quant_matrix[j]))) >> 4;
2616 level = (level - 1) | 1;
2623 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2624 DCTELEM *block, int n, int qscale)
2626 int i, level, nCoeffs;
2627 const uint16_t *quant_matrix;
2629 if(s->alternate_scan) nCoeffs= 63;
2630 else nCoeffs= s->block_last_index[n];
2633 block[0] = block[0] * s->y_dc_scale;
2635 block[0] = block[0] * s->c_dc_scale;
2636 quant_matrix = s->intra_matrix;
2637 for(i=1;i<=nCoeffs;i++) {
2638 int j= s->intra_scantable.permutated[i];
2643 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2646 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2653 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2654 DCTELEM *block, int n, int qscale)
2656 int i, level, nCoeffs;
2657 const uint16_t *quant_matrix;
2660 if(s->alternate_scan) nCoeffs= 63;
2661 else nCoeffs= s->block_last_index[n];
2664 block[0] = block[0] * s->y_dc_scale;
2666 block[0] = block[0] * s->c_dc_scale;
2667 quant_matrix = s->intra_matrix;
2668 for(i=1;i<=nCoeffs;i++) {
2669 int j= s->intra_scantable.permutated[i];
2674 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2677 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2686 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2687 DCTELEM *block, int n, int qscale)
2689 int i, level, nCoeffs;
2690 const uint16_t *quant_matrix;
2693 if(s->alternate_scan) nCoeffs= 63;
2694 else nCoeffs= s->block_last_index[n];
2696 quant_matrix = s->inter_matrix;
2697 for(i=0; i<=nCoeffs; i++) {
2698 int j= s->intra_scantable.permutated[i];
2703 level = (((level << 1) + 1) * qscale *
2704 ((int) (quant_matrix[j]))) >> 4;
2707 level = (((level << 1) + 1) * qscale *
2708 ((int) (quant_matrix[j]))) >> 4;
2717 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2718 DCTELEM *block, int n, int qscale)
2720 int i, level, qmul, qadd;
2723 assert(s->block_last_index[n]>=0);
2729 block[0] = block[0] * s->y_dc_scale;
2731 block[0] = block[0] * s->c_dc_scale;
2732 qadd = (qscale - 1) | 1;
2739 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2741 for(i=1; i<=nCoeffs; i++) {
2745 level = level * qmul - qadd;
2747 level = level * qmul + qadd;
2754 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2755 DCTELEM *block, int n, int qscale)
2757 int i, level, qmul, qadd;
2760 assert(s->block_last_index[n]>=0);
2762 qadd = (qscale - 1) | 1;
2765 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2767 for(i=0; i<=nCoeffs; i++) {
2771 level = level * qmul - qadd;
2773 level = level * qmul + qadd;
2781 * set qscale and update qscale dependent variables.
2783 void ff_set_qscale(MpegEncContext * s, int qscale)
2787 else if (qscale > 31)
2791 s->chroma_qscale= s->chroma_qscale_table[qscale];
2793 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2794 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2797 void MPV_report_decode_progress(MpegEncContext *s)
2799 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2800 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);