2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
38 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 DCTELEM *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
61 /* enable all paranoid tests for rounding, overflows, etc... */
67 static const uint8_t ff_default_chroma_qscale_table[32] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
70 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
73 const uint8_t ff_mpeg1_dc_scale_table[128] = {
74 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
85 static const uint8_t mpeg2_dc_scale_table1[128] = {
86 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
97 static const uint8_t mpeg2_dc_scale_table2[128] = {
98 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
109 static const uint8_t mpeg2_dc_scale_table3[128] = {
110 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
121 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
122 ff_mpeg1_dc_scale_table,
123 mpeg2_dc_scale_table1,
124 mpeg2_dc_scale_table2,
125 mpeg2_dc_scale_table3,
128 const enum AVPixelFormat ff_pixfmt_list_420[] = {
133 const enum AVPixelFormat ff_hwaccel_pixfmt_list_420[] = {
134 AV_PIX_FMT_DXVA2_VLD,
135 AV_PIX_FMT_VAAPI_VLD,
141 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
143 uint32_t * restrict state)
151 for (i = 0; i < 3; i++) {
152 uint32_t tmp = *state << 8;
153 *state = tmp + *(p++);
154 if (tmp == 0x100 || p == end)
159 if (p[-1] > 1 ) p += 3;
160 else if (p[-2] ) p += 2;
161 else if (p[-3]|(p[-1]-1)) p++;
168 p = FFMIN(p, end) - 4;
174 /* init common dct for both encoder and decoder */
175 av_cold int ff_dct_common_init(MpegEncContext *s)
177 ff_dsputil_init(&s->dsp, s->avctx);
179 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
180 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
181 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
182 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
183 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
184 if (s->flags & CODEC_FLAG_BITEXACT)
185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
186 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
189 ff_MPV_common_init_x86(s);
191 ff_MPV_common_init_axp(s);
193 ff_MPV_common_init_arm(s);
195 ff_MPV_common_init_altivec(s);
197 ff_MPV_common_init_bfin(s);
200 /* load & permutate scantables
201 * note: only wmv uses different ones
203 if (s->alternate_scan) {
204 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
205 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
207 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
208 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
210 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
211 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
216 void ff_copy_picture(Picture *dst, Picture *src)
219 dst->f.type = FF_BUFFER_TYPE_COPY;
223 * Release a frame buffer
225 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
227 /* WM Image / Screen codecs allocate internal buffers with different
228 * dimensions / colorspaces; ignore user-defined callbacks for these. */
229 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
230 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
231 s->codec_id != AV_CODEC_ID_MSS2)
232 ff_thread_release_buffer(s->avctx, &pic->f);
234 avcodec_default_release_buffer(s->avctx, &pic->f);
235 av_freep(&pic->f.hwaccel_picture_private);
239 * Allocate a frame buffer
241 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
245 if (s->avctx->hwaccel) {
246 assert(!pic->f.hwaccel_picture_private);
247 if (s->avctx->hwaccel->priv_data_size) {
248 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
249 if (!pic->f.hwaccel_picture_private) {
250 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
256 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
257 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
258 s->codec_id != AV_CODEC_ID_MSS2)
259 r = ff_thread_get_buffer(s->avctx, &pic->f);
261 r = avcodec_default_get_buffer(s->avctx, &pic->f);
263 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
264 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
265 r, pic->f.type, pic->f.data[0]);
266 av_freep(&pic->f.hwaccel_picture_private);
270 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
271 s->uvlinesize != pic->f.linesize[1])) {
272 av_log(s->avctx, AV_LOG_ERROR,
273 "get_buffer() failed (stride changed)\n");
274 free_frame_buffer(s, pic);
278 if (pic->f.linesize[1] != pic->f.linesize[2]) {
279 av_log(s->avctx, AV_LOG_ERROR,
280 "get_buffer() failed (uv stride mismatch)\n");
281 free_frame_buffer(s, pic);
289 * Allocate a Picture.
290 * The pixels are allocated/set by calling get_buffer() if shared = 0
292 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
294 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
296 // the + 1 is needed so memset(,,stride*height) does not sig11
298 const int mb_array_size = s->mb_stride * s->mb_height;
299 const int b8_array_size = s->b8_stride * s->mb_height * 2;
300 const int b4_array_size = s->b4_stride * s->mb_height * 4;
305 assert(pic->f.data[0]);
306 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
307 pic->f.type = FF_BUFFER_TYPE_SHARED;
309 assert(!pic->f.data[0]);
311 if (alloc_frame_buffer(s, pic) < 0)
314 s->linesize = pic->f.linesize[0];
315 s->uvlinesize = pic->f.linesize[1];
318 if (pic->f.qscale_table == NULL) {
320 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
321 mb_array_size * sizeof(int16_t), fail)
322 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
323 mb_array_size * sizeof(int16_t), fail)
324 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
325 mb_array_size * sizeof(int8_t ), fail)
328 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
329 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
330 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
331 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
333 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
334 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
336 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
337 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
338 if (s->out_format == FMT_H264) {
339 for (i = 0; i < 2; i++) {
340 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
341 2 * (b4_array_size + 4) * sizeof(int16_t),
343 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
344 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
345 4 * mb_array_size * sizeof(uint8_t), fail)
347 pic->f.motion_subsample_log2 = 2;
348 } else if (s->out_format == FMT_H263 || s->encoding ||
349 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
350 for (i = 0; i < 2; i++) {
351 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
352 2 * (b8_array_size + 4) * sizeof(int16_t),
354 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
355 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
356 4 * mb_array_size * sizeof(uint8_t), fail)
358 pic->f.motion_subsample_log2 = 3;
360 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
361 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
362 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
364 pic->f.qstride = s->mb_stride;
365 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
366 1 * sizeof(AVPanScan), fail)
372 fail: // for the FF_ALLOCZ_OR_GOTO macro
374 free_frame_buffer(s, pic);
379 * Deallocate a picture.
381 static void free_picture(MpegEncContext *s, Picture *pic)
385 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
386 free_frame_buffer(s, pic);
389 av_freep(&pic->mb_var);
390 av_freep(&pic->mc_mb_var);
391 av_freep(&pic->mb_mean);
392 av_freep(&pic->f.mbskip_table);
393 av_freep(&pic->qscale_table_base);
394 pic->f.qscale_table = NULL;
395 av_freep(&pic->mb_type_base);
396 pic->f.mb_type = NULL;
397 av_freep(&pic->f.dct_coeff);
398 av_freep(&pic->f.pan_scan);
399 pic->f.mb_type = NULL;
400 for (i = 0; i < 2; i++) {
401 av_freep(&pic->motion_val_base[i]);
402 av_freep(&pic->f.ref_index[i]);
403 pic->f.motion_val[i] = NULL;
406 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
407 for (i = 0; i < 4; i++) {
409 pic->f.data[i] = NULL;
415 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
417 int y_size = s->b8_stride * (2 * s->mb_height + 1);
418 int c_size = s->mb_stride * (s->mb_height + 1);
419 int yc_size = y_size + 2 * c_size;
422 // edge emu needs blocksize + filter length - 1
423 // (= 17x17 for halfpel / 21x21 for h264)
424 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
425 (s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance
427 // FIXME should be linesize instead of s->width * 2
428 // but that is not known before get_buffer()
429 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
430 (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
431 s->me.temp = s->me.scratchpad;
432 s->rd_scratchpad = s->me.scratchpad;
433 s->b_scratchpad = s->me.scratchpad;
434 s->obmc_scratchpad = s->me.scratchpad + 16;
436 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
437 ME_MAP_SIZE * sizeof(uint32_t), fail)
438 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
439 ME_MAP_SIZE * sizeof(uint32_t), fail)
440 if (s->avctx->noise_reduction) {
441 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
442 2 * 64 * sizeof(int), fail)
445 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
446 s->block = s->blocks[0];
448 for (i = 0; i < 12; i++) {
449 s->pblocks[i] = &s->block[i];
452 if (s->out_format == FMT_H263) {
454 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
455 yc_size * sizeof(int16_t) * 16, fail);
456 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
457 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
458 s->ac_val[2] = s->ac_val[1] + c_size;
463 return -1; // free() through ff_MPV_common_end()
466 static void free_duplicate_context(MpegEncContext *s)
471 av_freep(&s->edge_emu_buffer);
472 av_freep(&s->me.scratchpad);
476 s->obmc_scratchpad = NULL;
478 av_freep(&s->dct_error_sum);
479 av_freep(&s->me.map);
480 av_freep(&s->me.score_map);
481 av_freep(&s->blocks);
482 av_freep(&s->ac_val_base);
486 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
488 #define COPY(a) bak->a = src->a
489 COPY(edge_emu_buffer);
494 COPY(obmc_scratchpad);
501 COPY(me.map_generation);
513 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
517 // FIXME copy only needed parts
519 backup_duplicate_context(&bak, dst);
520 memcpy(dst, src, sizeof(MpegEncContext));
521 backup_duplicate_context(dst, &bak);
522 for (i = 0; i < 12; i++) {
523 dst->pblocks[i] = &dst->block[i];
525 // STOP_TIMER("update_duplicate_context")
526 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
529 int ff_mpeg_update_thread_context(AVCodecContext *dst,
530 const AVCodecContext *src)
533 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
535 if (dst == src || !s1->context_initialized)
538 // FIXME can parameters change on I-frames?
539 // in that case dst may need a reinit
540 if (!s->context_initialized) {
541 memcpy(s, s1, sizeof(MpegEncContext));
544 s->picture_range_start += MAX_PICTURE_COUNT;
545 s->picture_range_end += MAX_PICTURE_COUNT;
546 s->bitstream_buffer = NULL;
547 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
549 ff_MPV_common_init(s);
552 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
554 s->context_reinit = 0;
555 s->height = s1->height;
556 s->width = s1->width;
557 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
561 s->avctx->coded_height = s1->avctx->coded_height;
562 s->avctx->coded_width = s1->avctx->coded_width;
563 s->avctx->width = s1->avctx->width;
564 s->avctx->height = s1->avctx->height;
566 s->coded_picture_number = s1->coded_picture_number;
567 s->picture_number = s1->picture_number;
568 s->input_picture_number = s1->input_picture_number;
570 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
571 memcpy(&s->last_picture, &s1->last_picture,
572 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
574 // reset s->picture[].f.extended_data to s->picture[].f.data
575 for (i = 0; i < s->picture_count; i++)
576 s->picture[i].f.extended_data = s->picture[i].f.data;
578 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
579 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
580 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
582 // Error/bug resilience
583 s->next_p_frame_damaged = s1->next_p_frame_damaged;
584 s->workaround_bugs = s1->workaround_bugs;
587 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
588 (char *) &s1->shape - (char *) &s1->time_increment_bits);
591 s->max_b_frames = s1->max_b_frames;
592 s->low_delay = s1->low_delay;
593 s->dropable = s1->dropable;
595 // DivX handling (doesn't work)
596 s->divx_packed = s1->divx_packed;
598 if (s1->bitstream_buffer) {
599 if (s1->bitstream_buffer_size +
600 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
601 av_fast_malloc(&s->bitstream_buffer,
602 &s->allocated_bitstream_buffer_size,
603 s1->allocated_bitstream_buffer_size);
604 s->bitstream_buffer_size = s1->bitstream_buffer_size;
605 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
606 s1->bitstream_buffer_size);
607 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
608 FF_INPUT_BUFFER_PADDING_SIZE);
611 // MPEG2/interlacing info
612 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
613 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
615 if (!s1->first_field) {
616 s->last_pict_type = s1->pict_type;
617 if (s1->current_picture_ptr)
618 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
620 if (s1->pict_type != AV_PICTURE_TYPE_B) {
621 s->last_non_b_pict_type = s1->pict_type;
629 * Set the given MpegEncContext to common defaults
630 * (same for encoding and decoding).
631 * The changed fields will not depend upon the
632 * prior state of the MpegEncContext.
634 void ff_MPV_common_defaults(MpegEncContext *s)
636 s->y_dc_scale_table =
637 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
638 s->chroma_qscale_table = ff_default_chroma_qscale_table;
639 s->progressive_frame = 1;
640 s->progressive_sequence = 1;
641 s->picture_structure = PICT_FRAME;
643 s->coded_picture_number = 0;
644 s->picture_number = 0;
645 s->input_picture_number = 0;
647 s->picture_in_gop_number = 0;
652 s->picture_range_start = 0;
653 s->picture_range_end = MAX_PICTURE_COUNT;
655 s->slice_context_count = 1;
659 * Set the given MpegEncContext to defaults for decoding.
660 * the changed fields will not depend upon
661 * the prior state of the MpegEncContext.
663 void ff_MPV_decode_defaults(MpegEncContext *s)
665 ff_MPV_common_defaults(s);
669 * Initialize and allocates MpegEncContext fields dependent on the resolution.
671 static int init_context_frame(MpegEncContext *s)
673 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
675 s->mb_width = (s->width + 15) / 16;
676 s->mb_stride = s->mb_width + 1;
677 s->b8_stride = s->mb_width * 2 + 1;
678 s->b4_stride = s->mb_width * 4 + 1;
679 mb_array_size = s->mb_height * s->mb_stride;
680 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
682 /* set default edge pos, will be overriden
683 * in decode_header if needed */
684 s->h_edge_pos = s->mb_width * 16;
685 s->v_edge_pos = s->mb_height * 16;
687 s->mb_num = s->mb_width * s->mb_height;
692 s->block_wrap[3] = s->b8_stride;
694 s->block_wrap[5] = s->mb_stride;
696 y_size = s->b8_stride * (2 * s->mb_height + 1);
697 c_size = s->mb_stride * (s->mb_height + 1);
698 yc_size = y_size + 2 * c_size;
700 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
701 fail); // error ressilience code looks cleaner with this
702 for (y = 0; y < s->mb_height; y++)
703 for (x = 0; x < s->mb_width; x++)
704 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
706 s->mb_index2xy[s->mb_height * s->mb_width] =
707 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
710 /* Allocate MV tables */
711 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
712 mv_table_size * 2 * sizeof(int16_t), fail);
713 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
714 mv_table_size * 2 * sizeof(int16_t), fail);
715 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
716 mv_table_size * 2 * sizeof(int16_t), fail);
717 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
718 mv_table_size * 2 * sizeof(int16_t), fail);
719 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
720 mv_table_size * 2 * sizeof(int16_t), fail);
721 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
722 mv_table_size * 2 * sizeof(int16_t), fail);
723 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
724 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
725 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
726 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
728 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
730 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
732 /* Allocate MB type table */
733 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
734 sizeof(uint16_t), fail); // needed for encoding
736 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
739 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
740 mb_array_size * sizeof(float), fail);
741 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
742 mb_array_size * sizeof(float), fail);
746 FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer,
747 mb_array_size * sizeof(uint8_t), fail);
748 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
749 mb_array_size * sizeof(uint8_t), fail);
751 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
752 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
753 /* interlaced direct mode decoding tables */
754 for (i = 0; i < 2; i++) {
756 for (j = 0; j < 2; j++) {
757 for (k = 0; k < 2; k++) {
758 FF_ALLOCZ_OR_GOTO(s->avctx,
759 s->b_field_mv_table_base[i][j][k],
760 mv_table_size * 2 * sizeof(int16_t),
762 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
765 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
766 mb_array_size * 2 * sizeof(uint8_t), fail);
767 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
768 mv_table_size * 2 * sizeof(int16_t), fail);
769 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
772 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
773 mb_array_size * 2 * sizeof(uint8_t), fail);
776 if (s->out_format == FMT_H263) {
778 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
779 s->coded_block = s->coded_block_base + s->b8_stride + 1;
781 /* cbp, ac_pred, pred_dir */
782 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
783 mb_array_size * sizeof(uint8_t), fail);
784 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
785 mb_array_size * sizeof(uint8_t), fail);
788 if (s->h263_pred || s->h263_plus || !s->encoding) {
790 // MN: we need these for error resilience of intra-frames
791 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
792 yc_size * sizeof(int16_t), fail);
793 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
794 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
795 s->dc_val[2] = s->dc_val[1] + c_size;
796 for (i = 0; i < yc_size; i++)
797 s->dc_val_base[i] = 1024;
800 /* which mb is a intra block */
801 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
802 memset(s->mbintra_table, 1, mb_array_size);
804 /* init macroblock skip table */
805 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
806 // Note the + 1 is for a quicker mpeg4 slice_end detection
808 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
809 s->avctx->debug_mv) {
810 s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
811 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
812 s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
813 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
814 s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
815 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
820 return AVERROR(ENOMEM);
824 * init common structure for both encoder and decoder.
825 * this assumes that some variables like width/height are already set
827 av_cold int ff_MPV_common_init(MpegEncContext *s)
830 int nb_slices = (HAVE_THREADS &&
831 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
832 s->avctx->thread_count : 1;
834 if (s->encoding && s->avctx->slices)
835 nb_slices = s->avctx->slices;
837 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
838 s->mb_height = (s->height + 31) / 32 * 2;
839 else if (s->codec_id != AV_CODEC_ID_H264)
840 s->mb_height = (s->height + 15) / 16;
842 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
843 av_log(s->avctx, AV_LOG_ERROR,
844 "decoding to AV_PIX_FMT_NONE is not supported.\n");
848 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
851 max_slices = FFMIN(MAX_THREADS, s->mb_height);
853 max_slices = MAX_THREADS;
854 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
855 " reducing to %d\n", nb_slices, max_slices);
856 nb_slices = max_slices;
859 if ((s->width || s->height) &&
860 av_image_check_size(s->width, s->height, 0, s->avctx))
863 ff_dct_common_init(s);
865 s->flags = s->avctx->flags;
866 s->flags2 = s->avctx->flags2;
868 if (s->width && s->height) {
869 /* set chroma shifts */
870 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
873 /* convert fourcc to upper case */
874 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
876 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
878 s->avctx->coded_frame = &s->current_picture.f;
881 if (s->msmpeg4_version) {
882 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
883 2 * 2 * (MAX_LEVEL + 1) *
884 (MAX_RUN + 1) * 2 * sizeof(int), fail);
886 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
888 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
889 64 * 32 * sizeof(int), fail);
890 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
891 64 * 32 * sizeof(int), fail);
892 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
893 64 * 32 * 2 * sizeof(uint16_t), fail);
894 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
895 64 * 32 * 2 * sizeof(uint16_t), fail);
896 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
897 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
898 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
899 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
901 if (s->avctx->noise_reduction) {
902 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
903 2 * 64 * sizeof(uint16_t), fail);
908 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
909 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
910 s->picture_count * sizeof(Picture), fail);
911 for (i = 0; i < s->picture_count; i++) {
912 avcodec_get_frame_defaults(&s->picture[i].f);
915 if (s->width && s->height) {
916 if (init_context_frame(s))
919 s->parse_context.state = -1;
922 s->context_initialized = 1;
923 s->thread_context[0] = s;
925 if (s->width && s->height) {
927 for (i = 1; i < nb_slices; i++) {
928 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
929 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
932 for (i = 0; i < nb_slices; i++) {
933 if (init_duplicate_context(s->thread_context[i], s) < 0)
935 s->thread_context[i]->start_mb_y =
936 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
937 s->thread_context[i]->end_mb_y =
938 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
941 if (init_duplicate_context(s, s) < 0)
944 s->end_mb_y = s->mb_height;
946 s->slice_context_count = nb_slices;
951 ff_MPV_common_end(s);
956 * Frees and resets MpegEncContext fields depending on the resolution.
957 * Is used during resolution changes to avoid a full reinitialization of the
960 static int free_context_frame(MpegEncContext *s)
964 av_freep(&s->mb_type);
965 av_freep(&s->p_mv_table_base);
966 av_freep(&s->b_forw_mv_table_base);
967 av_freep(&s->b_back_mv_table_base);
968 av_freep(&s->b_bidir_forw_mv_table_base);
969 av_freep(&s->b_bidir_back_mv_table_base);
970 av_freep(&s->b_direct_mv_table_base);
971 s->p_mv_table = NULL;
972 s->b_forw_mv_table = NULL;
973 s->b_back_mv_table = NULL;
974 s->b_bidir_forw_mv_table = NULL;
975 s->b_bidir_back_mv_table = NULL;
976 s->b_direct_mv_table = NULL;
977 for (i = 0; i < 2; i++) {
978 for (j = 0; j < 2; j++) {
979 for (k = 0; k < 2; k++) {
980 av_freep(&s->b_field_mv_table_base[i][j][k]);
981 s->b_field_mv_table[i][j][k] = NULL;
983 av_freep(&s->b_field_select_table[i][j]);
984 av_freep(&s->p_field_mv_table_base[i][j]);
985 s->p_field_mv_table[i][j] = NULL;
987 av_freep(&s->p_field_select_table[i]);
990 av_freep(&s->dc_val_base);
991 av_freep(&s->coded_block_base);
992 av_freep(&s->mbintra_table);
993 av_freep(&s->cbp_table);
994 av_freep(&s->pred_dir_table);
996 av_freep(&s->mbskip_table);
998 av_freep(&s->error_status_table);
999 av_freep(&s->er_temp_buffer);
1000 av_freep(&s->mb_index2xy);
1001 av_freep(&s->lambda_table);
1002 av_freep(&s->cplx_tab);
1003 av_freep(&s->bits_tab);
1005 s->linesize = s->uvlinesize = 0;
1007 for (i = 0; i < 3; i++)
1008 av_freep(&s->visualization_buffer[i]);
1010 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
1011 avcodec_default_free_buffers(s->avctx);
1016 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1020 if (s->slice_context_count > 1) {
1021 for (i = 0; i < s->slice_context_count; i++) {
1022 free_duplicate_context(s->thread_context[i]);
1024 for (i = 1; i < s->slice_context_count; i++) {
1025 av_freep(&s->thread_context[i]);
1028 free_duplicate_context(s);
1030 free_context_frame(s);
1033 for (i = 0; i < s->picture_count; i++) {
1034 s->picture[i].needs_realloc = 1;
1037 s->last_picture_ptr =
1038 s->next_picture_ptr =
1039 s->current_picture_ptr = NULL;
1042 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1043 s->mb_height = (s->height + 31) / 32 * 2;
1044 else if (s->codec_id != AV_CODEC_ID_H264)
1045 s->mb_height = (s->height + 15) / 16;
1047 if ((s->width || s->height) &&
1048 av_image_check_size(s->width, s->height, 0, s->avctx))
1049 return AVERROR_INVALIDDATA;
1051 if ((err = init_context_frame(s)))
1054 s->thread_context[0] = s;
1056 if (s->width && s->height) {
1057 int nb_slices = s->slice_context_count;
1058 if (nb_slices > 1) {
1059 for (i = 1; i < nb_slices; i++) {
1060 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1061 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1064 for (i = 0; i < nb_slices; i++) {
1065 if (init_duplicate_context(s->thread_context[i], s) < 0)
1067 s->thread_context[i]->start_mb_y =
1068 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1069 s->thread_context[i]->end_mb_y =
1070 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1073 if (init_duplicate_context(s, s) < 0)
1076 s->end_mb_y = s->mb_height;
1078 s->slice_context_count = nb_slices;
1083 ff_MPV_common_end(s);
1087 /* init common structure for both encoder and decoder */
1088 void ff_MPV_common_end(MpegEncContext *s)
1092 if (s->slice_context_count > 1) {
1093 for (i = 0; i < s->slice_context_count; i++) {
1094 free_duplicate_context(s->thread_context[i]);
1096 for (i = 1; i < s->slice_context_count; i++) {
1097 av_freep(&s->thread_context[i]);
1099 s->slice_context_count = 1;
1100 } else free_duplicate_context(s);
1102 av_freep(&s->parse_context.buffer);
1103 s->parse_context.buffer_size = 0;
1105 av_freep(&s->bitstream_buffer);
1106 s->allocated_bitstream_buffer_size = 0;
1108 av_freep(&s->avctx->stats_out);
1109 av_freep(&s->ac_stats);
1111 av_freep(&s->q_intra_matrix);
1112 av_freep(&s->q_inter_matrix);
1113 av_freep(&s->q_intra_matrix16);
1114 av_freep(&s->q_inter_matrix16);
1115 av_freep(&s->input_picture);
1116 av_freep(&s->reordered_input_picture);
1117 av_freep(&s->dct_offset);
1119 if (s->picture && !s->avctx->internal->is_copy) {
1120 for (i = 0; i < s->picture_count; i++) {
1121 free_picture(s, &s->picture[i]);
1124 av_freep(&s->picture);
1126 free_context_frame(s);
1128 s->context_initialized = 0;
1129 s->last_picture_ptr =
1130 s->next_picture_ptr =
1131 s->current_picture_ptr = NULL;
1132 s->linesize = s->uvlinesize = 0;
1135 void ff_init_rl(RLTable *rl,
1136 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1138 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1139 uint8_t index_run[MAX_RUN + 1];
1140 int last, run, level, start, end, i;
1142 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1143 if (static_store && rl->max_level[0])
1146 /* compute max_level[], max_run[] and index_run[] */
1147 for (last = 0; last < 2; last++) {
1156 memset(max_level, 0, MAX_RUN + 1);
1157 memset(max_run, 0, MAX_LEVEL + 1);
1158 memset(index_run, rl->n, MAX_RUN + 1);
1159 for (i = start; i < end; i++) {
1160 run = rl->table_run[i];
1161 level = rl->table_level[i];
1162 if (index_run[run] == rl->n)
1164 if (level > max_level[run])
1165 max_level[run] = level;
1166 if (run > max_run[level])
1167 max_run[level] = run;
1170 rl->max_level[last] = static_store[last];
1172 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1173 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1175 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1177 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1178 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1180 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1182 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1183 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1187 void ff_init_vlc_rl(RLTable *rl)
1191 for (q = 0; q < 32; q++) {
1193 int qadd = (q - 1) | 1;
1199 for (i = 0; i < rl->vlc.table_size; i++) {
1200 int code = rl->vlc.table[i][0];
1201 int len = rl->vlc.table[i][1];
1204 if (len == 0) { // illegal code
1207 } else if (len < 0) { // more bits needed
1211 if (code == rl->n) { // esc
1215 run = rl->table_run[code] + 1;
1216 level = rl->table_level[code] * qmul + qadd;
1217 if (code >= rl->last) run += 192;
1220 rl->rl_vlc[q][i].len = len;
1221 rl->rl_vlc[q][i].level = level;
1222 rl->rl_vlc[q][i].run = run;
1227 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1231 /* release non reference frames */
1232 for (i = 0; i < s->picture_count; i++) {
1233 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1234 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1235 (remove_current || &s->picture[i] != s->current_picture_ptr)
1236 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1237 free_frame_buffer(s, &s->picture[i]);
1242 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1244 if (pic->f.data[0] == NULL)
1246 if (pic->needs_realloc)
1247 if (!pic->owner2 || pic->owner2 == s)
1252 static int find_unused_picture(MpegEncContext *s, int shared)
1257 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1258 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1262 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1263 if (pic_is_unused(s, &s->picture[i]) && s->picture[i].f.type != 0)
1266 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1267 if (pic_is_unused(s, &s->picture[i]))
1272 return AVERROR_INVALIDDATA;
1275 int ff_find_unused_picture(MpegEncContext *s, int shared)
1277 int ret = find_unused_picture(s, shared);
1279 if (ret >= 0 && ret < s->picture_range_end) {
1280 if (s->picture[ret].needs_realloc) {
1281 s->picture[ret].needs_realloc = 0;
1282 free_picture(s, &s->picture[ret]);
1283 avcodec_get_frame_defaults(&s->picture[ret].f);
1289 static void update_noise_reduction(MpegEncContext *s)
1293 for (intra = 0; intra < 2; intra++) {
1294 if (s->dct_count[intra] > (1 << 16)) {
1295 for (i = 0; i < 64; i++) {
1296 s->dct_error_sum[intra][i] >>= 1;
1298 s->dct_count[intra] >>= 1;
1301 for (i = 0; i < 64; i++) {
1302 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1303 s->dct_count[intra] +
1304 s->dct_error_sum[intra][i] / 2) /
1305 (s->dct_error_sum[intra][i] + 1);
1311 * generic function for encode/decode called after coding/decoding
1312 * the header and before a frame is coded/decoded.
1314 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1320 /* mark & release old frames */
1321 if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
1322 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1323 s->last_picture_ptr != s->next_picture_ptr &&
1324 s->last_picture_ptr->f.data[0]) {
1325 if (s->last_picture_ptr->owner2 == s)
1326 free_frame_buffer(s, s->last_picture_ptr);
1329 /* release forgotten pictures */
1330 /* if (mpeg124/h263) */
1332 for (i = 0; i < s->picture_count; i++) {
1333 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1334 &s->picture[i] != s->last_picture_ptr &&
1335 &s->picture[i] != s->next_picture_ptr &&
1336 s->picture[i].f.reference && !s->picture[i].needs_realloc) {
1337 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1338 av_log(avctx, AV_LOG_ERROR,
1339 "releasing zombie picture\n");
1340 free_frame_buffer(s, &s->picture[i]);
1347 ff_release_unused_pictures(s, 1);
1349 if (s->current_picture_ptr &&
1350 s->current_picture_ptr->f.data[0] == NULL) {
1351 // we already have a unused image
1352 // (maybe it was set before reading the header)
1353 pic = s->current_picture_ptr;
1355 i = ff_find_unused_picture(s, 0);
1357 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1360 pic = &s->picture[i];
1363 pic->f.reference = 0;
1365 if (s->codec_id == AV_CODEC_ID_H264)
1366 pic->f.reference = s->picture_structure;
1367 else if (s->pict_type != AV_PICTURE_TYPE_B)
1368 pic->f.reference = 3;
1371 pic->f.coded_picture_number = s->coded_picture_number++;
1373 if (ff_alloc_picture(s, pic, 0) < 0)
1376 s->current_picture_ptr = pic;
1377 // FIXME use only the vars from current_pic
1378 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1379 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1380 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1381 if (s->picture_structure != PICT_FRAME)
1382 s->current_picture_ptr->f.top_field_first =
1383 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1385 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1386 !s->progressive_sequence;
1387 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1390 s->current_picture_ptr->f.pict_type = s->pict_type;
1391 // if (s->flags && CODEC_FLAG_QSCALE)
1392 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1393 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1395 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1397 if (s->pict_type != AV_PICTURE_TYPE_B) {
1398 s->last_picture_ptr = s->next_picture_ptr;
1400 s->next_picture_ptr = s->current_picture_ptr;
1402 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1403 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1404 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1405 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1406 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1407 s->pict_type, s->dropable);
1409 if (s->codec_id != AV_CODEC_ID_H264) {
1410 if ((s->last_picture_ptr == NULL ||
1411 s->last_picture_ptr->f.data[0] == NULL) &&
1412 (s->pict_type != AV_PICTURE_TYPE_I ||
1413 s->picture_structure != PICT_FRAME)) {
1414 if (s->pict_type != AV_PICTURE_TYPE_I)
1415 av_log(avctx, AV_LOG_ERROR,
1416 "warning: first frame is no keyframe\n");
1417 else if (s->picture_structure != PICT_FRAME)
1418 av_log(avctx, AV_LOG_INFO,
1419 "allocate dummy last picture for field based first keyframe\n");
1421 /* Allocate a dummy frame */
1422 i = ff_find_unused_picture(s, 0);
1424 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1427 s->last_picture_ptr = &s->picture[i];
1428 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1429 s->last_picture_ptr = NULL;
1432 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1433 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1434 s->last_picture_ptr->f.reference = 3;
1436 if ((s->next_picture_ptr == NULL ||
1437 s->next_picture_ptr->f.data[0] == NULL) &&
1438 s->pict_type == AV_PICTURE_TYPE_B) {
1439 /* Allocate a dummy frame */
1440 i = ff_find_unused_picture(s, 0);
1442 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1445 s->next_picture_ptr = &s->picture[i];
1446 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1447 s->next_picture_ptr = NULL;
1450 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1451 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1452 s->next_picture_ptr->f.reference = 3;
1456 if (s->last_picture_ptr)
1457 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1458 if (s->next_picture_ptr)
1459 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1461 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
1462 (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3)) {
1463 if (s->next_picture_ptr)
1464 s->next_picture_ptr->owner2 = s;
1465 if (s->last_picture_ptr)
1466 s->last_picture_ptr->owner2 = s;
1469 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1470 s->last_picture_ptr->f.data[0]));
1472 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1474 for (i = 0; i < 4; i++) {
1475 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1476 s->current_picture.f.data[i] +=
1477 s->current_picture.f.linesize[i];
1479 s->current_picture.f.linesize[i] *= 2;
1480 s->last_picture.f.linesize[i] *= 2;
1481 s->next_picture.f.linesize[i] *= 2;
1485 s->err_recognition = avctx->err_recognition;
1487 /* set dequantizer, we can't do it during init as
1488 * it might change for mpeg4 and we can't do it in the header
1489 * decode as init is not called for mpeg4 there yet */
1490 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1491 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1492 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1493 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1494 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1495 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1497 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1498 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1501 if (s->dct_error_sum) {
1502 assert(s->avctx->noise_reduction && s->encoding);
1503 update_noise_reduction(s);
1506 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1507 return ff_xvmc_field_start(s, avctx);
1512 /* generic function for encode/decode called after a
1513 * frame has been coded/decoded. */
1514 void ff_MPV_frame_end(MpegEncContext *s)
1517 /* redraw edges for the frame if decoding didn't complete */
1518 // just to make sure that all data is rendered.
1519 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1520 ff_xvmc_field_end(s);
1521 } else if ((s->error_count || s->encoding) &&
1522 !s->avctx->hwaccel &&
1523 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1524 s->unrestricted_mv &&
1525 s->current_picture.f.reference &&
1527 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1528 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1529 int hshift = desc->log2_chroma_w;
1530 int vshift = desc->log2_chroma_h;
1531 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1532 s->h_edge_pos, s->v_edge_pos,
1533 EDGE_WIDTH, EDGE_WIDTH,
1534 EDGE_TOP | EDGE_BOTTOM);
1535 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1536 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1537 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1538 EDGE_TOP | EDGE_BOTTOM);
1539 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1540 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1541 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1542 EDGE_TOP | EDGE_BOTTOM);
1547 s->last_pict_type = s->pict_type;
1548 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1549 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1550 s->last_non_b_pict_type = s->pict_type;
1553 /* copy back current_picture variables */
1554 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1555 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1556 s->picture[i] = s->current_picture;
1560 assert(i < MAX_PICTURE_COUNT);
1564 /* release non-reference frames */
1565 for (i = 0; i < s->picture_count; i++) {
1566 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1567 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1568 free_frame_buffer(s, &s->picture[i]);
1572 // clear copies, to avoid confusion
1574 memset(&s->last_picture, 0, sizeof(Picture));
1575 memset(&s->next_picture, 0, sizeof(Picture));
1576 memset(&s->current_picture, 0, sizeof(Picture));
1578 s->avctx->coded_frame = &s->current_picture_ptr->f;
1580 if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
1581 ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
1586 * Draw a line from (ex, ey) -> (sx, sy).
1587 * @param w width of the image
1588 * @param h height of the image
1589 * @param stride stride/linesize of the image
1590 * @param color color of the arrow
1592 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1593 int w, int h, int stride, int color)
1597 sx = av_clip(sx, 0, w - 1);
1598 sy = av_clip(sy, 0, h - 1);
1599 ex = av_clip(ex, 0, w - 1);
1600 ey = av_clip(ey, 0, h - 1);
1602 buf[sy * stride + sx] += color;
1604 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1606 FFSWAP(int, sx, ex);
1607 FFSWAP(int, sy, ey);
1609 buf += sx + sy * stride;
1611 f = ((ey - sy) << 16) / ex;
1612 for (x = 0; x = ex; x++) {
1614 fr = (x * f) & 0xFFFF;
1615 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1616 buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1620 FFSWAP(int, sx, ex);
1621 FFSWAP(int, sy, ey);
1623 buf += sx + sy * stride;
1626 f = ((ex - sx) << 16) / ey;
1629 for (y = 0; y = ey; y++) {
1631 fr = (y * f) & 0xFFFF;
1632 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1633 buf[y * stride + x + 1] += (color * fr ) >> 16;
1639 * Draw an arrow from (ex, ey) -> (sx, sy).
1640 * @param w width of the image
1641 * @param h height of the image
1642 * @param stride stride/linesize of the image
1643 * @param color color of the arrow
1645 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1646 int ey, int w, int h, int stride, int color)
1650 sx = av_clip(sx, -100, w + 100);
1651 sy = av_clip(sy, -100, h + 100);
1652 ex = av_clip(ex, -100, w + 100);
1653 ey = av_clip(ey, -100, h + 100);
1658 if (dx * dx + dy * dy > 3 * 3) {
1661 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1663 // FIXME subpixel accuracy
1664 rx = ROUNDED_DIV(rx * 3 << 4, length);
1665 ry = ROUNDED_DIV(ry * 3 << 4, length);
1667 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1668 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1670 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1674 * Print debugging info for the given picture.
1676 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
1678 if (s->avctx->hwaccel || !pict || !pict->mb_type)
1681 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1684 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1685 switch (pict->pict_type) {
1686 case AV_PICTURE_TYPE_I:
1687 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1689 case AV_PICTURE_TYPE_P:
1690 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1692 case AV_PICTURE_TYPE_B:
1693 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1695 case AV_PICTURE_TYPE_S:
1696 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1698 case AV_PICTURE_TYPE_SI:
1699 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1701 case AV_PICTURE_TYPE_SP:
1702 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1705 for (y = 0; y < s->mb_height; y++) {
1706 for (x = 0; x < s->mb_width; x++) {
1707 if (s->avctx->debug & FF_DEBUG_SKIP) {
1708 int count = s->mbskip_table[x + y * s->mb_stride];
1711 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1713 if (s->avctx->debug & FF_DEBUG_QP) {
1714 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1715 pict->qscale_table[x + y * s->mb_stride]);
1717 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1718 int mb_type = pict->mb_type[x + y * s->mb_stride];
1719 // Type & MV direction
1720 if (IS_PCM(mb_type))
1721 av_log(s->avctx, AV_LOG_DEBUG, "P");
1722 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1723 av_log(s->avctx, AV_LOG_DEBUG, "A");
1724 else if (IS_INTRA4x4(mb_type))
1725 av_log(s->avctx, AV_LOG_DEBUG, "i");
1726 else if (IS_INTRA16x16(mb_type))
1727 av_log(s->avctx, AV_LOG_DEBUG, "I");
1728 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1729 av_log(s->avctx, AV_LOG_DEBUG, "d");
1730 else if (IS_DIRECT(mb_type))
1731 av_log(s->avctx, AV_LOG_DEBUG, "D");
1732 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1733 av_log(s->avctx, AV_LOG_DEBUG, "g");
1734 else if (IS_GMC(mb_type))
1735 av_log(s->avctx, AV_LOG_DEBUG, "G");
1736 else if (IS_SKIP(mb_type))
1737 av_log(s->avctx, AV_LOG_DEBUG, "S");
1738 else if (!USES_LIST(mb_type, 1))
1739 av_log(s->avctx, AV_LOG_DEBUG, ">");
1740 else if (!USES_LIST(mb_type, 0))
1741 av_log(s->avctx, AV_LOG_DEBUG, "<");
1743 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1744 av_log(s->avctx, AV_LOG_DEBUG, "X");
1748 if (IS_8X8(mb_type))
1749 av_log(s->avctx, AV_LOG_DEBUG, "+");
1750 else if (IS_16X8(mb_type))
1751 av_log(s->avctx, AV_LOG_DEBUG, "-");
1752 else if (IS_8X16(mb_type))
1753 av_log(s->avctx, AV_LOG_DEBUG, "|");
1754 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1755 av_log(s->avctx, AV_LOG_DEBUG, " ");
1757 av_log(s->avctx, AV_LOG_DEBUG, "?");
1760 if (IS_INTERLACED(mb_type))
1761 av_log(s->avctx, AV_LOG_DEBUG, "=");
1763 av_log(s->avctx, AV_LOG_DEBUG, " ");
1766 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1770 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1771 (s->avctx->debug_mv)) {
1772 const int shift = 1 + s->quarter_sample;
1776 int h_chroma_shift, v_chroma_shift, block_height;
1777 const int width = s->avctx->width;
1778 const int height = s->avctx->height;
1779 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1780 const int mv_stride = (s->mb_width << mv_sample_log2) +
1781 (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
1782 s->low_delay = 0; // needed to see the vectors without trashing the buffers
1784 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1785 &h_chroma_shift, &v_chroma_shift);
1786 for (i = 0; i < 3; i++) {
1787 memcpy(s->visualization_buffer[i], pict->data[i],
1788 (i == 0) ? pict->linesize[i] * height:
1789 pict->linesize[i] * height >> v_chroma_shift);
1790 pict->data[i] = s->visualization_buffer[i];
1792 pict->type = FF_BUFFER_TYPE_COPY;
1793 ptr = pict->data[0];
1794 block_height = 16 >> v_chroma_shift;
1796 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1798 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1799 const int mb_index = mb_x + mb_y * s->mb_stride;
1800 if ((s->avctx->debug_mv) && pict->motion_val) {
1802 for (type = 0; type < 3; type++) {
1806 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1807 (pict->pict_type!= AV_PICTURE_TYPE_P))
1812 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1813 (pict->pict_type!= AV_PICTURE_TYPE_B))
1818 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1819 (pict->pict_type!= AV_PICTURE_TYPE_B))
1824 if (!USES_LIST(pict->mb_type[mb_index], direction))
1827 if (IS_8X8(pict->mb_type[mb_index])) {
1829 for (i = 0; i < 4; i++) {
1830 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1831 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1832 int xy = (mb_x * 2 + (i & 1) +
1833 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1834 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1835 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1836 draw_arrow(ptr, sx, sy, mx, my, width,
1837 height, s->linesize, 100);
1839 } else if (IS_16X8(pict->mb_type[mb_index])) {
1841 for (i = 0; i < 2; i++) {
1842 int sx = mb_x * 16 + 8;
1843 int sy = mb_y * 16 + 4 + 8 * i;
1844 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1845 int mx = (pict->motion_val[direction][xy][0] >> shift);
1846 int my = (pict->motion_val[direction][xy][1] >> shift);
1848 if (IS_INTERLACED(pict->mb_type[mb_index]))
1851 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1852 height, s->linesize, 100);
1854 } else if (IS_8X16(pict->mb_type[mb_index])) {
1856 for (i = 0; i < 2; i++) {
1857 int sx = mb_x * 16 + 4 + 8 * i;
1858 int sy = mb_y * 16 + 8;
1859 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1860 int mx = pict->motion_val[direction][xy][0] >> shift;
1861 int my = pict->motion_val[direction][xy][1] >> shift;
1863 if (IS_INTERLACED(pict->mb_type[mb_index]))
1866 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1867 height, s->linesize, 100);
1870 int sx = mb_x * 16 + 8;
1871 int sy = mb_y * 16 + 8;
1872 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
1873 int mx = pict->motion_val[direction][xy][0] >> shift + sx;
1874 int my = pict->motion_val[direction][xy][1] >> shift + sy;
1875 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1879 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1880 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1881 0x0101010101010101ULL;
1883 for (y = 0; y < block_height; y++) {
1884 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1885 (block_height * mb_y + y) *
1886 pict->linesize[1]) = c;
1887 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1888 (block_height * mb_y + y) *
1889 pict->linesize[2]) = c;
1892 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1894 int mb_type = pict->mb_type[mb_index];
1897 #define COLOR(theta, r) \
1898 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1899 v = (int)(128 + r * sin(theta * 3.141592 / 180));
1903 if (IS_PCM(mb_type)) {
1905 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1906 IS_INTRA16x16(mb_type)) {
1908 } else if (IS_INTRA4x4(mb_type)) {
1910 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1912 } else if (IS_DIRECT(mb_type)) {
1914 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1916 } else if (IS_GMC(mb_type)) {
1918 } else if (IS_SKIP(mb_type)) {
1920 } else if (!USES_LIST(mb_type, 1)) {
1922 } else if (!USES_LIST(mb_type, 0)) {
1925 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1929 u *= 0x0101010101010101ULL;
1930 v *= 0x0101010101010101ULL;
1931 for (y = 0; y < block_height; y++) {
1932 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1933 (block_height * mb_y + y) * pict->linesize[1]) = u;
1934 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1935 (block_height * mb_y + y) * pict->linesize[2]) = v;
1939 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
1940 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
1941 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1942 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
1943 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1945 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
1946 for (y = 0; y < 16; y++)
1947 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
1948 pict->linesize[0]] ^= 0x80;
1950 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
1951 int dm = 1 << (mv_sample_log2 - 2);
1952 for (i = 0; i < 4; i++) {
1953 int sx = mb_x * 16 + 8 * (i & 1);
1954 int sy = mb_y * 16 + 8 * (i >> 1);
1955 int xy = (mb_x * 2 + (i & 1) +
1956 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1958 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
1959 if (mv[0] != mv[dm] ||
1960 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
1961 for (y = 0; y < 8; y++)
1962 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
1963 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
1964 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
1965 pict->linesize[0]) ^= 0x8080808080808080ULL;
1969 if (IS_INTERLACED(mb_type) &&
1970 s->codec_id == AV_CODEC_ID_H264) {
1974 s->mbskip_table[mb_index] = 0;
1981 * find the lowest MB row referenced in the MVs
1983 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1985 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1986 int my, off, i, mvs;
1988 if (s->picture_structure != PICT_FRAME) goto unhandled;
1990 switch (s->mv_type) {
2004 for (i = 0; i < mvs; i++) {
2005 my = s->mv[dir][i][1]<<qpel_shift;
2006 my_max = FFMAX(my_max, my);
2007 my_min = FFMIN(my_min, my);
2010 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2012 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2014 return s->mb_height-1;
2017 /* put block[] to dest[] */
2018 static inline void put_dct(MpegEncContext *s,
2019 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2021 s->dct_unquantize_intra(s, block, i, qscale);
2022 s->dsp.idct_put (dest, line_size, block);
2025 /* add block[] to dest[] */
2026 static inline void add_dct(MpegEncContext *s,
2027 DCTELEM *block, int i, uint8_t *dest, int line_size)
2029 if (s->block_last_index[i] >= 0) {
2030 s->dsp.idct_add (dest, line_size, block);
2034 static inline void add_dequant_dct(MpegEncContext *s,
2035 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2037 if (s->block_last_index[i] >= 0) {
2038 s->dct_unquantize_inter(s, block, i, qscale);
2040 s->dsp.idct_add (dest, line_size, block);
2045 * Clean dc, ac, coded_block for the current non-intra MB.
2047 void ff_clean_intra_table_entries(MpegEncContext *s)
2049 int wrap = s->b8_stride;
2050 int xy = s->block_index[0];
2053 s->dc_val[0][xy + 1 ] =
2054 s->dc_val[0][xy + wrap] =
2055 s->dc_val[0][xy + 1 + wrap] = 1024;
2057 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2058 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2059 if (s->msmpeg4_version>=3) {
2060 s->coded_block[xy ] =
2061 s->coded_block[xy + 1 ] =
2062 s->coded_block[xy + wrap] =
2063 s->coded_block[xy + 1 + wrap] = 0;
2066 wrap = s->mb_stride;
2067 xy = s->mb_x + s->mb_y * wrap;
2069 s->dc_val[2][xy] = 1024;
2071 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2072 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2074 s->mbintra_table[xy]= 0;
2077 /* generic function called after a macroblock has been parsed by the
2078 decoder or after it has been encoded by the encoder.
2080 Important variables used:
2081 s->mb_intra : true if intra macroblock
2082 s->mv_dir : motion vector direction
2083 s->mv_type : motion vector type
2084 s->mv : motion vector
2085 s->interlaced_dct : true if interlaced dct used (mpeg2)
2087 static av_always_inline
2088 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2091 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2092 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2093 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2097 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2098 /* save DCT coefficients */
2100 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2101 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2103 for(j=0; j<64; j++){
2104 *dct++ = block[i][s->dsp.idct_permutation[j]];
2105 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2107 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2111 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2113 /* update DC predictors for P macroblocks */
2115 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2116 if(s->mbintra_table[mb_xy])
2117 ff_clean_intra_table_entries(s);
2121 s->last_dc[2] = 128 << s->intra_dc_precision;
2124 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2125 s->mbintra_table[mb_xy]=1;
2127 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2128 uint8_t *dest_y, *dest_cb, *dest_cr;
2129 int dct_linesize, dct_offset;
2130 op_pixels_func (*op_pix)[4];
2131 qpel_mc_func (*op_qpix)[16];
2132 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2133 const int uvlinesize = s->current_picture.f.linesize[1];
2134 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2135 const int block_size = 8;
2137 /* avoid copy if macroblock skipped in last frame too */
2138 /* skip only during decoding as we might trash the buffers during encoding a bit */
2140 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2142 if (s->mb_skipped) {
2144 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2146 } else if(!s->current_picture.f.reference) {
2149 *mbskip_ptr = 0; /* not skipped */
2153 dct_linesize = linesize << s->interlaced_dct;
2154 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2158 dest_cb= s->dest[1];
2159 dest_cr= s->dest[2];
2161 dest_y = s->b_scratchpad;
2162 dest_cb= s->b_scratchpad+16*linesize;
2163 dest_cr= s->b_scratchpad+32*linesize;
2167 /* motion handling */
2168 /* decoding or more than one mb_type (MC was already done otherwise) */
2171 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2172 if (s->mv_dir & MV_DIR_FORWARD) {
2173 ff_thread_await_progress(&s->last_picture_ptr->f,
2174 ff_MPV_lowest_referenced_row(s, 0),
2177 if (s->mv_dir & MV_DIR_BACKWARD) {
2178 ff_thread_await_progress(&s->next_picture_ptr->f,
2179 ff_MPV_lowest_referenced_row(s, 1),
2184 op_qpix= s->me.qpel_put;
2185 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2186 op_pix = s->dsp.put_pixels_tab;
2188 op_pix = s->dsp.put_no_rnd_pixels_tab;
2190 if (s->mv_dir & MV_DIR_FORWARD) {
2191 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2192 op_pix = s->dsp.avg_pixels_tab;
2193 op_qpix= s->me.qpel_avg;
2195 if (s->mv_dir & MV_DIR_BACKWARD) {
2196 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2200 /* skip dequant / idct if we are really late ;) */
2201 if(s->avctx->skip_idct){
2202 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2203 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2204 || s->avctx->skip_idct >= AVDISCARD_ALL)
2208 /* add dct residue */
2209 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2210 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2211 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2212 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2213 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2214 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2216 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2217 if (s->chroma_y_shift){
2218 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2219 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2223 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2224 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2225 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2226 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2229 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2230 add_dct(s, block[0], 0, dest_y , dct_linesize);
2231 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2232 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2233 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2235 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2236 if(s->chroma_y_shift){//Chroma420
2237 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2238 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2241 dct_linesize = uvlinesize << s->interlaced_dct;
2242 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2244 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2245 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2246 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2247 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2248 if(!s->chroma_x_shift){//Chroma444
2249 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2250 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2251 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2252 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2257 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2258 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2261 /* dct only in intra block */
2262 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2263 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2264 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2265 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2266 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2268 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2269 if(s->chroma_y_shift){
2270 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2271 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2275 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2276 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2277 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2278 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2282 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2283 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2284 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2285 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2287 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2288 if(s->chroma_y_shift){
2289 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2290 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2293 dct_linesize = uvlinesize << s->interlaced_dct;
2294 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2296 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2297 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2298 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2299 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2300 if(!s->chroma_x_shift){//Chroma444
2301 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2302 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2303 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2304 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2312 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2313 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2314 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2319 void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2321 if(s->out_format == FMT_MPEG1) {
2322 MPV_decode_mb_internal(s, block, 1);
2325 MPV_decode_mb_internal(s, block, 0);
2329 * @param h is the normal height, this will be reduced automatically if needed for the last row
2331 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2332 const int field_pic= s->picture_structure != PICT_FRAME;
2338 if (!s->avctx->hwaccel
2339 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2340 && s->unrestricted_mv
2341 && s->current_picture.f.reference
2343 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2344 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
2345 int sides = 0, edge_h;
2346 int hshift = desc->log2_chroma_w;
2347 int vshift = desc->log2_chroma_h;
2348 if (y==0) sides |= EDGE_TOP;
2349 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2351 edge_h= FFMIN(h, s->v_edge_pos - y);
2353 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2354 s->linesize, s->h_edge_pos, edge_h,
2355 EDGE_WIDTH, EDGE_WIDTH, sides);
2356 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2357 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2358 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2359 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2360 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2361 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2364 h= FFMIN(h, s->avctx->height - y);
2366 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2368 if (s->avctx->draw_horiz_band) {
2370 int offset[AV_NUM_DATA_POINTERS];
2373 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2374 src = &s->current_picture_ptr->f;
2375 else if(s->last_picture_ptr)
2376 src = &s->last_picture_ptr->f;
2380 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2381 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2384 offset[0]= y * s->linesize;
2386 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2387 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2393 s->avctx->draw_horiz_band(s->avctx, src, offset,
2394 y, s->picture_structure, h);
2398 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2399 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2400 const int uvlinesize = s->current_picture.f.linesize[1];
2401 const int mb_size= 4;
2403 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2404 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2405 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2406 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2407 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2408 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2409 //block_index is not used by mpeg2, so it is not affected by chroma_format
2411 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2412 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2413 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2415 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2417 if(s->picture_structure==PICT_FRAME){
2418 s->dest[0] += s->mb_y * linesize << mb_size;
2419 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2420 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2422 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2423 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2424 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2425 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2430 void ff_mpeg_flush(AVCodecContext *avctx){
2432 MpegEncContext *s = avctx->priv_data;
2434 if(s==NULL || s->picture==NULL)
2437 for(i=0; i<s->picture_count; i++){
2438 if (s->picture[i].f.data[0] &&
2439 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2440 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2441 free_frame_buffer(s, &s->picture[i]);
2443 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2445 s->mb_x= s->mb_y= 0;
2447 s->parse_context.state= -1;
2448 s->parse_context.frame_start_found= 0;
2449 s->parse_context.overread= 0;
2450 s->parse_context.overread_index= 0;
2451 s->parse_context.index= 0;
2452 s->parse_context.last_index= 0;
2453 s->bitstream_buffer_size=0;
2457 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2458 DCTELEM *block, int n, int qscale)
2460 int i, level, nCoeffs;
2461 const uint16_t *quant_matrix;
2463 nCoeffs= s->block_last_index[n];
2466 block[0] = block[0] * s->y_dc_scale;
2468 block[0] = block[0] * s->c_dc_scale;
2469 /* XXX: only mpeg1 */
2470 quant_matrix = s->intra_matrix;
2471 for(i=1;i<=nCoeffs;i++) {
2472 int j= s->intra_scantable.permutated[i];
2477 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2478 level = (level - 1) | 1;
2481 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2482 level = (level - 1) | 1;
2489 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2490 DCTELEM *block, int n, int qscale)
2492 int i, level, nCoeffs;
2493 const uint16_t *quant_matrix;
2495 nCoeffs= s->block_last_index[n];
2497 quant_matrix = s->inter_matrix;
2498 for(i=0; i<=nCoeffs; i++) {
2499 int j= s->intra_scantable.permutated[i];
2504 level = (((level << 1) + 1) * qscale *
2505 ((int) (quant_matrix[j]))) >> 4;
2506 level = (level - 1) | 1;
2509 level = (((level << 1) + 1) * qscale *
2510 ((int) (quant_matrix[j]))) >> 4;
2511 level = (level - 1) | 1;
2518 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2519 DCTELEM *block, int n, int qscale)
2521 int i, level, nCoeffs;
2522 const uint16_t *quant_matrix;
2524 if(s->alternate_scan) nCoeffs= 63;
2525 else nCoeffs= s->block_last_index[n];
2528 block[0] = block[0] * s->y_dc_scale;
2530 block[0] = block[0] * s->c_dc_scale;
2531 quant_matrix = s->intra_matrix;
2532 for(i=1;i<=nCoeffs;i++) {
2533 int j= s->intra_scantable.permutated[i];
2538 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2541 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2548 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2549 DCTELEM *block, int n, int qscale)
2551 int i, level, nCoeffs;
2552 const uint16_t *quant_matrix;
2555 if(s->alternate_scan) nCoeffs= 63;
2556 else nCoeffs= s->block_last_index[n];
2559 block[0] = block[0] * s->y_dc_scale;
2561 block[0] = block[0] * s->c_dc_scale;
2562 quant_matrix = s->intra_matrix;
2563 for(i=1;i<=nCoeffs;i++) {
2564 int j= s->intra_scantable.permutated[i];
2569 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2572 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2581 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2582 DCTELEM *block, int n, int qscale)
2584 int i, level, nCoeffs;
2585 const uint16_t *quant_matrix;
2588 if(s->alternate_scan) nCoeffs= 63;
2589 else nCoeffs= s->block_last_index[n];
2591 quant_matrix = s->inter_matrix;
2592 for(i=0; i<=nCoeffs; i++) {
2593 int j= s->intra_scantable.permutated[i];
2598 level = (((level << 1) + 1) * qscale *
2599 ((int) (quant_matrix[j]))) >> 4;
2602 level = (((level << 1) + 1) * qscale *
2603 ((int) (quant_matrix[j]))) >> 4;
2612 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2613 DCTELEM *block, int n, int qscale)
2615 int i, level, qmul, qadd;
2618 assert(s->block_last_index[n]>=0);
2624 block[0] = block[0] * s->y_dc_scale;
2626 block[0] = block[0] * s->c_dc_scale;
2627 qadd = (qscale - 1) | 1;
2634 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2636 for(i=1; i<=nCoeffs; i++) {
2640 level = level * qmul - qadd;
2642 level = level * qmul + qadd;
2649 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2650 DCTELEM *block, int n, int qscale)
2652 int i, level, qmul, qadd;
2655 assert(s->block_last_index[n]>=0);
2657 qadd = (qscale - 1) | 1;
2660 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2662 for(i=0; i<=nCoeffs; i++) {
2666 level = level * qmul - qadd;
2668 level = level * qmul + qadd;
2676 * set qscale and update qscale dependent variables.
2678 void ff_set_qscale(MpegEncContext * s, int qscale)
2682 else if (qscale > 31)
2686 s->chroma_qscale= s->chroma_qscale_table[qscale];
2688 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2689 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2692 void ff_MPV_report_decode_progress(MpegEncContext *s)
2694 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2695 ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);