2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
38 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 DCTELEM *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
61 /* enable all paranoid tests for rounding, overflows, etc... */
67 static const uint8_t ff_default_chroma_qscale_table[32] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
70 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
73 const uint8_t ff_mpeg1_dc_scale_table[128] = {
74 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
85 static const uint8_t mpeg2_dc_scale_table1[128] = {
86 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
97 static const uint8_t mpeg2_dc_scale_table2[128] = {
98 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
109 static const uint8_t mpeg2_dc_scale_table3[128] = {
110 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
121 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
122 ff_mpeg1_dc_scale_table,
123 mpeg2_dc_scale_table1,
124 mpeg2_dc_scale_table2,
125 mpeg2_dc_scale_table3,
128 const enum AVPixelFormat ff_pixfmt_list_420[] = {
133 const enum AVPixelFormat ff_hwaccel_pixfmt_list_420[] = {
134 AV_PIX_FMT_DXVA2_VLD,
135 AV_PIX_FMT_VAAPI_VLD,
141 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
143 uint32_t * restrict state)
151 for (i = 0; i < 3; i++) {
152 uint32_t tmp = *state << 8;
153 *state = tmp + *(p++);
154 if (tmp == 0x100 || p == end)
159 if (p[-1] > 1 ) p += 3;
160 else if (p[-2] ) p += 2;
161 else if (p[-3]|(p[-1]-1)) p++;
168 p = FFMIN(p, end) - 4;
174 /* init common dct for both encoder and decoder */
175 av_cold int ff_dct_common_init(MpegEncContext *s)
177 ff_dsputil_init(&s->dsp, s->avctx);
179 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
180 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
181 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
182 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
183 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
184 if (s->flags & CODEC_FLAG_BITEXACT)
185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
186 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
189 ff_MPV_common_init_x86(s);
191 ff_MPV_common_init_axp(s);
193 ff_MPV_common_init_arm(s);
195 ff_MPV_common_init_altivec(s);
197 ff_MPV_common_init_bfin(s);
200 /* load & permutate scantables
201 * note: only wmv uses different ones
203 if (s->alternate_scan) {
204 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
205 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
207 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
208 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
210 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
211 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
216 void ff_copy_picture(Picture *dst, Picture *src)
219 dst->f.type = FF_BUFFER_TYPE_COPY;
223 * Release a frame buffer
225 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
227 /* WM Image / Screen codecs allocate internal buffers with different
228 * dimensions / colorspaces; ignore user-defined callbacks for these. */
229 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
230 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
231 s->codec_id != AV_CODEC_ID_MSS2)
232 ff_thread_release_buffer(s->avctx, &pic->f);
234 avcodec_default_release_buffer(s->avctx, &pic->f);
235 av_freep(&pic->f.hwaccel_picture_private);
239 * Allocate a frame buffer
241 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
245 if (s->avctx->hwaccel) {
246 assert(!pic->f.hwaccel_picture_private);
247 if (s->avctx->hwaccel->priv_data_size) {
248 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
249 if (!pic->f.hwaccel_picture_private) {
250 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
256 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
257 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
258 s->codec_id != AV_CODEC_ID_MSS2)
259 r = ff_thread_get_buffer(s->avctx, &pic->f);
261 r = avcodec_default_get_buffer(s->avctx, &pic->f);
263 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
264 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
265 r, pic->f.type, pic->f.data[0]);
266 av_freep(&pic->f.hwaccel_picture_private);
270 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
271 s->uvlinesize != pic->f.linesize[1])) {
272 av_log(s->avctx, AV_LOG_ERROR,
273 "get_buffer() failed (stride changed)\n");
274 free_frame_buffer(s, pic);
278 if (pic->f.linesize[1] != pic->f.linesize[2]) {
279 av_log(s->avctx, AV_LOG_ERROR,
280 "get_buffer() failed (uv stride mismatch)\n");
281 free_frame_buffer(s, pic);
289 * Allocate a Picture.
290 * The pixels are allocated/set by calling get_buffer() if shared = 0
292 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
294 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
296 // the + 1 is needed so memset(,,stride*height) does not sig11
298 const int mb_array_size = s->mb_stride * s->mb_height;
299 const int b8_array_size = s->b8_stride * s->mb_height * 2;
300 const int b4_array_size = s->b4_stride * s->mb_height * 4;
305 assert(pic->f.data[0]);
306 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
307 pic->f.type = FF_BUFFER_TYPE_SHARED;
309 assert(!pic->f.data[0]);
311 if (alloc_frame_buffer(s, pic) < 0)
314 s->linesize = pic->f.linesize[0];
315 s->uvlinesize = pic->f.linesize[1];
318 if (pic->f.qscale_table == NULL) {
320 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
321 mb_array_size * sizeof(int16_t), fail)
322 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
323 mb_array_size * sizeof(int16_t), fail)
324 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
325 mb_array_size * sizeof(int8_t ), fail)
328 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
329 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
330 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
331 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
333 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
334 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
336 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
337 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
338 if (s->out_format == FMT_H264) {
339 for (i = 0; i < 2; i++) {
340 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
341 2 * (b4_array_size + 4) * sizeof(int16_t),
343 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
344 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
345 4 * mb_array_size * sizeof(uint8_t), fail)
347 pic->f.motion_subsample_log2 = 2;
348 } else if (s->out_format == FMT_H263 || s->encoding ||
349 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
350 for (i = 0; i < 2; i++) {
351 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
352 2 * (b8_array_size + 4) * sizeof(int16_t),
354 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
355 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
356 4 * mb_array_size * sizeof(uint8_t), fail)
358 pic->f.motion_subsample_log2 = 3;
360 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
361 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
362 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
364 pic->f.qstride = s->mb_stride;
365 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
366 1 * sizeof(AVPanScan), fail)
372 fail: // for the FF_ALLOCZ_OR_GOTO macro
374 free_frame_buffer(s, pic);
379 * Deallocate a picture.
381 static void free_picture(MpegEncContext *s, Picture *pic)
385 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
386 free_frame_buffer(s, pic);
389 av_freep(&pic->mb_var);
390 av_freep(&pic->mc_mb_var);
391 av_freep(&pic->mb_mean);
392 av_freep(&pic->f.mbskip_table);
393 av_freep(&pic->qscale_table_base);
394 pic->f.qscale_table = NULL;
395 av_freep(&pic->mb_type_base);
396 pic->f.mb_type = NULL;
397 av_freep(&pic->f.dct_coeff);
398 av_freep(&pic->f.pan_scan);
399 pic->f.mb_type = NULL;
400 for (i = 0; i < 2; i++) {
401 av_freep(&pic->motion_val_base[i]);
402 av_freep(&pic->f.ref_index[i]);
403 pic->f.motion_val[i] = NULL;
406 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
407 for (i = 0; i < 4; i++) {
409 pic->f.data[i] = NULL;
415 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
417 int y_size = s->b8_stride * (2 * s->mb_height + 1);
418 int c_size = s->mb_stride * (s->mb_height + 1);
419 int yc_size = y_size + 2 * c_size;
422 // edge emu needs blocksize + filter length - 1
423 // (= 17x17 for halfpel / 21x21 for h264)
424 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
425 (s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance
427 // FIXME should be linesize instead of s->width * 2
428 // but that is not known before get_buffer()
429 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
430 (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
431 s->me.temp = s->me.scratchpad;
432 s->rd_scratchpad = s->me.scratchpad;
433 s->b_scratchpad = s->me.scratchpad;
434 s->obmc_scratchpad = s->me.scratchpad + 16;
436 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
437 ME_MAP_SIZE * sizeof(uint32_t), fail)
438 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
439 ME_MAP_SIZE * sizeof(uint32_t), fail)
440 if (s->avctx->noise_reduction) {
441 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
442 2 * 64 * sizeof(int), fail)
445 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
446 s->block = s->blocks[0];
448 for (i = 0; i < 12; i++) {
449 s->pblocks[i] = &s->block[i];
452 if (s->out_format == FMT_H263) {
454 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
455 yc_size * sizeof(int16_t) * 16, fail);
456 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
457 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
458 s->ac_val[2] = s->ac_val[1] + c_size;
463 return -1; // free() through ff_MPV_common_end()
466 static void free_duplicate_context(MpegEncContext *s)
471 av_freep(&s->edge_emu_buffer);
472 av_freep(&s->me.scratchpad);
476 s->obmc_scratchpad = NULL;
478 av_freep(&s->dct_error_sum);
479 av_freep(&s->me.map);
480 av_freep(&s->me.score_map);
481 av_freep(&s->blocks);
482 av_freep(&s->ac_val_base);
486 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
488 #define COPY(a) bak->a = src->a
489 COPY(edge_emu_buffer);
494 COPY(obmc_scratchpad);
501 COPY(me.map_generation);
513 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
517 // FIXME copy only needed parts
519 backup_duplicate_context(&bak, dst);
520 memcpy(dst, src, sizeof(MpegEncContext));
521 backup_duplicate_context(dst, &bak);
522 for (i = 0; i < 12; i++) {
523 dst->pblocks[i] = &dst->block[i];
525 // STOP_TIMER("update_duplicate_context")
526 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
529 int ff_mpeg_update_thread_context(AVCodecContext *dst,
530 const AVCodecContext *src)
533 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
535 if (dst == src || !s1->context_initialized)
538 // FIXME can parameters change on I-frames?
539 // in that case dst may need a reinit
540 if (!s->context_initialized) {
541 memcpy(s, s1, sizeof(MpegEncContext));
544 s->picture_range_start += MAX_PICTURE_COUNT;
545 s->picture_range_end += MAX_PICTURE_COUNT;
546 s->bitstream_buffer = NULL;
547 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
549 ff_MPV_common_init(s);
552 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
554 s->context_reinit = 0;
555 s->height = s1->height;
556 s->width = s1->width;
557 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
561 s->avctx->coded_height = s1->avctx->coded_height;
562 s->avctx->coded_width = s1->avctx->coded_width;
563 s->avctx->width = s1->avctx->width;
564 s->avctx->height = s1->avctx->height;
566 s->coded_picture_number = s1->coded_picture_number;
567 s->picture_number = s1->picture_number;
568 s->input_picture_number = s1->input_picture_number;
570 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
571 memcpy(&s->last_picture, &s1->last_picture,
572 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
574 // reset s->picture[].f.extended_data to s->picture[].f.data
575 for (i = 0; i < s->picture_count; i++)
576 s->picture[i].f.extended_data = s->picture[i].f.data;
578 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
579 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
580 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
582 // Error/bug resilience
583 s->next_p_frame_damaged = s1->next_p_frame_damaged;
584 s->workaround_bugs = s1->workaround_bugs;
587 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
588 (char *) &s1->shape - (char *) &s1->time_increment_bits);
591 s->max_b_frames = s1->max_b_frames;
592 s->low_delay = s1->low_delay;
593 s->droppable = s1->droppable;
595 // DivX handling (doesn't work)
596 s->divx_packed = s1->divx_packed;
598 if (s1->bitstream_buffer) {
599 if (s1->bitstream_buffer_size +
600 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
601 av_fast_malloc(&s->bitstream_buffer,
602 &s->allocated_bitstream_buffer_size,
603 s1->allocated_bitstream_buffer_size);
604 s->bitstream_buffer_size = s1->bitstream_buffer_size;
605 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
606 s1->bitstream_buffer_size);
607 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
608 FF_INPUT_BUFFER_PADDING_SIZE);
611 // MPEG2/interlacing info
612 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
613 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
615 if (!s1->first_field) {
616 s->last_pict_type = s1->pict_type;
617 if (s1->current_picture_ptr)
618 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
620 if (s1->pict_type != AV_PICTURE_TYPE_B) {
621 s->last_non_b_pict_type = s1->pict_type;
629 * Set the given MpegEncContext to common defaults
630 * (same for encoding and decoding).
631 * The changed fields will not depend upon the
632 * prior state of the MpegEncContext.
634 void ff_MPV_common_defaults(MpegEncContext *s)
636 s->y_dc_scale_table =
637 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
638 s->chroma_qscale_table = ff_default_chroma_qscale_table;
639 s->progressive_frame = 1;
640 s->progressive_sequence = 1;
641 s->picture_structure = PICT_FRAME;
643 s->coded_picture_number = 0;
644 s->picture_number = 0;
645 s->input_picture_number = 0;
647 s->picture_in_gop_number = 0;
652 s->picture_range_start = 0;
653 s->picture_range_end = MAX_PICTURE_COUNT;
655 s->slice_context_count = 1;
659 * Set the given MpegEncContext to defaults for decoding.
660 * the changed fields will not depend upon
661 * the prior state of the MpegEncContext.
663 void ff_MPV_decode_defaults(MpegEncContext *s)
665 ff_MPV_common_defaults(s);
669 * Initialize and allocates MpegEncContext fields dependent on the resolution.
671 static int init_context_frame(MpegEncContext *s)
673 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
675 s->mb_width = (s->width + 15) / 16;
676 s->mb_stride = s->mb_width + 1;
677 s->b8_stride = s->mb_width * 2 + 1;
678 s->b4_stride = s->mb_width * 4 + 1;
679 mb_array_size = s->mb_height * s->mb_stride;
680 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
682 /* set default edge pos, will be overriden
683 * in decode_header if needed */
684 s->h_edge_pos = s->mb_width * 16;
685 s->v_edge_pos = s->mb_height * 16;
687 s->mb_num = s->mb_width * s->mb_height;
692 s->block_wrap[3] = s->b8_stride;
694 s->block_wrap[5] = s->mb_stride;
696 y_size = s->b8_stride * (2 * s->mb_height + 1);
697 c_size = s->mb_stride * (s->mb_height + 1);
698 yc_size = y_size + 2 * c_size;
700 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
701 fail); // error ressilience code looks cleaner with this
702 for (y = 0; y < s->mb_height; y++)
703 for (x = 0; x < s->mb_width; x++)
704 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
706 s->mb_index2xy[s->mb_height * s->mb_width] =
707 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
710 /* Allocate MV tables */
711 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
712 mv_table_size * 2 * sizeof(int16_t), fail);
713 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
714 mv_table_size * 2 * sizeof(int16_t), fail);
715 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
716 mv_table_size * 2 * sizeof(int16_t), fail);
717 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
718 mv_table_size * 2 * sizeof(int16_t), fail);
719 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
720 mv_table_size * 2 * sizeof(int16_t), fail);
721 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
722 mv_table_size * 2 * sizeof(int16_t), fail);
723 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
724 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
725 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
726 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
728 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
730 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
732 /* Allocate MB type table */
733 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
734 sizeof(uint16_t), fail); // needed for encoding
736 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
739 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
740 mb_array_size * sizeof(float), fail);
741 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
742 mb_array_size * sizeof(float), fail);
746 FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer,
747 mb_array_size * sizeof(uint8_t), fail);
748 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
749 mb_array_size * sizeof(uint8_t), fail);
751 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
752 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
753 /* interlaced direct mode decoding tables */
754 for (i = 0; i < 2; i++) {
756 for (j = 0; j < 2; j++) {
757 for (k = 0; k < 2; k++) {
758 FF_ALLOCZ_OR_GOTO(s->avctx,
759 s->b_field_mv_table_base[i][j][k],
760 mv_table_size * 2 * sizeof(int16_t),
762 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
765 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
766 mb_array_size * 2 * sizeof(uint8_t), fail);
767 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
768 mv_table_size * 2 * sizeof(int16_t), fail);
769 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
772 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
773 mb_array_size * 2 * sizeof(uint8_t), fail);
776 if (s->out_format == FMT_H263) {
778 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
779 s->coded_block = s->coded_block_base + s->b8_stride + 1;
781 /* cbp, ac_pred, pred_dir */
782 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
783 mb_array_size * sizeof(uint8_t), fail);
784 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
785 mb_array_size * sizeof(uint8_t), fail);
788 if (s->h263_pred || s->h263_plus || !s->encoding) {
790 // MN: we need these for error resilience of intra-frames
791 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
792 yc_size * sizeof(int16_t), fail);
793 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
794 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
795 s->dc_val[2] = s->dc_val[1] + c_size;
796 for (i = 0; i < yc_size; i++)
797 s->dc_val_base[i] = 1024;
800 /* which mb is a intra block */
801 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
802 memset(s->mbintra_table, 1, mb_array_size);
804 /* init macroblock skip table */
805 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
806 // Note the + 1 is for a quicker mpeg4 slice_end detection
808 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
809 s->avctx->debug_mv) {
810 s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
811 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
812 s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
813 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
814 s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
815 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
820 return AVERROR(ENOMEM);
824 * init common structure for both encoder and decoder.
825 * this assumes that some variables like width/height are already set
827 av_cold int ff_MPV_common_init(MpegEncContext *s)
830 int nb_slices = (HAVE_THREADS &&
831 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
832 s->avctx->thread_count : 1;
834 if (s->encoding && s->avctx->slices)
835 nb_slices = s->avctx->slices;
837 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
838 s->mb_height = (s->height + 31) / 32 * 2;
839 else if (s->codec_id != AV_CODEC_ID_H264)
840 s->mb_height = (s->height + 15) / 16;
842 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
843 av_log(s->avctx, AV_LOG_ERROR,
844 "decoding to AV_PIX_FMT_NONE is not supported.\n");
848 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
851 max_slices = FFMIN(MAX_THREADS, s->mb_height);
853 max_slices = MAX_THREADS;
854 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
855 " reducing to %d\n", nb_slices, max_slices);
856 nb_slices = max_slices;
859 if ((s->width || s->height) &&
860 av_image_check_size(s->width, s->height, 0, s->avctx))
863 ff_dct_common_init(s);
865 s->flags = s->avctx->flags;
866 s->flags2 = s->avctx->flags2;
868 if (s->width && s->height) {
869 /* set chroma shifts */
870 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
874 /* convert fourcc to upper case */
875 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
877 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
879 s->avctx->coded_frame = &s->current_picture.f;
882 if (s->msmpeg4_version) {
883 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
884 2 * 2 * (MAX_LEVEL + 1) *
885 (MAX_RUN + 1) * 2 * sizeof(int), fail);
887 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
889 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
890 64 * 32 * sizeof(int), fail);
891 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
892 64 * 32 * sizeof(int), fail);
893 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
894 64 * 32 * 2 * sizeof(uint16_t), fail);
895 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
896 64 * 32 * 2 * sizeof(uint16_t), fail);
897 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
898 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
899 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
900 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
902 if (s->avctx->noise_reduction) {
903 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
904 2 * 64 * sizeof(uint16_t), fail);
909 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
910 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
911 s->picture_count * sizeof(Picture), fail);
912 for (i = 0; i < s->picture_count; i++) {
913 avcodec_get_frame_defaults(&s->picture[i].f);
916 if (s->width && s->height) {
917 if (init_context_frame(s))
920 s->parse_context.state = -1;
923 s->context_initialized = 1;
924 s->thread_context[0] = s;
926 if (s->width && s->height) {
928 for (i = 1; i < nb_slices; i++) {
929 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
930 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
933 for (i = 0; i < nb_slices; i++) {
934 if (init_duplicate_context(s->thread_context[i], s) < 0)
936 s->thread_context[i]->start_mb_y =
937 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
938 s->thread_context[i]->end_mb_y =
939 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
942 if (init_duplicate_context(s, s) < 0)
945 s->end_mb_y = s->mb_height;
947 s->slice_context_count = nb_slices;
952 ff_MPV_common_end(s);
957 * Frees and resets MpegEncContext fields depending on the resolution.
958 * Is used during resolution changes to avoid a full reinitialization of the
961 static int free_context_frame(MpegEncContext *s)
965 av_freep(&s->mb_type);
966 av_freep(&s->p_mv_table_base);
967 av_freep(&s->b_forw_mv_table_base);
968 av_freep(&s->b_back_mv_table_base);
969 av_freep(&s->b_bidir_forw_mv_table_base);
970 av_freep(&s->b_bidir_back_mv_table_base);
971 av_freep(&s->b_direct_mv_table_base);
972 s->p_mv_table = NULL;
973 s->b_forw_mv_table = NULL;
974 s->b_back_mv_table = NULL;
975 s->b_bidir_forw_mv_table = NULL;
976 s->b_bidir_back_mv_table = NULL;
977 s->b_direct_mv_table = NULL;
978 for (i = 0; i < 2; i++) {
979 for (j = 0; j < 2; j++) {
980 for (k = 0; k < 2; k++) {
981 av_freep(&s->b_field_mv_table_base[i][j][k]);
982 s->b_field_mv_table[i][j][k] = NULL;
984 av_freep(&s->b_field_select_table[i][j]);
985 av_freep(&s->p_field_mv_table_base[i][j]);
986 s->p_field_mv_table[i][j] = NULL;
988 av_freep(&s->p_field_select_table[i]);
991 av_freep(&s->dc_val_base);
992 av_freep(&s->coded_block_base);
993 av_freep(&s->mbintra_table);
994 av_freep(&s->cbp_table);
995 av_freep(&s->pred_dir_table);
997 av_freep(&s->mbskip_table);
999 av_freep(&s->error_status_table);
1000 av_freep(&s->er_temp_buffer);
1001 av_freep(&s->mb_index2xy);
1002 av_freep(&s->lambda_table);
1003 av_freep(&s->cplx_tab);
1004 av_freep(&s->bits_tab);
1006 s->linesize = s->uvlinesize = 0;
1008 for (i = 0; i < 3; i++)
1009 av_freep(&s->visualization_buffer[i]);
1011 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
1012 avcodec_default_free_buffers(s->avctx);
1017 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1021 if (s->slice_context_count > 1) {
1022 for (i = 0; i < s->slice_context_count; i++) {
1023 free_duplicate_context(s->thread_context[i]);
1025 for (i = 1; i < s->slice_context_count; i++) {
1026 av_freep(&s->thread_context[i]);
1029 free_duplicate_context(s);
1031 free_context_frame(s);
1034 for (i = 0; i < s->picture_count; i++) {
1035 s->picture[i].needs_realloc = 1;
1038 s->last_picture_ptr =
1039 s->next_picture_ptr =
1040 s->current_picture_ptr = NULL;
1043 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1044 s->mb_height = (s->height + 31) / 32 * 2;
1045 else if (s->codec_id != AV_CODEC_ID_H264)
1046 s->mb_height = (s->height + 15) / 16;
1048 if ((s->width || s->height) &&
1049 av_image_check_size(s->width, s->height, 0, s->avctx))
1050 return AVERROR_INVALIDDATA;
1052 if ((err = init_context_frame(s)))
1055 s->thread_context[0] = s;
1057 if (s->width && s->height) {
1058 int nb_slices = s->slice_context_count;
1059 if (nb_slices > 1) {
1060 for (i = 1; i < nb_slices; i++) {
1061 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1062 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1065 for (i = 0; i < nb_slices; i++) {
1066 if (init_duplicate_context(s->thread_context[i], s) < 0)
1068 s->thread_context[i]->start_mb_y =
1069 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1070 s->thread_context[i]->end_mb_y =
1071 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1074 if (init_duplicate_context(s, s) < 0)
1077 s->end_mb_y = s->mb_height;
1079 s->slice_context_count = nb_slices;
1084 ff_MPV_common_end(s);
1088 /* init common structure for both encoder and decoder */
1089 void ff_MPV_common_end(MpegEncContext *s)
1093 if (s->slice_context_count > 1) {
1094 for (i = 0; i < s->slice_context_count; i++) {
1095 free_duplicate_context(s->thread_context[i]);
1097 for (i = 1; i < s->slice_context_count; i++) {
1098 av_freep(&s->thread_context[i]);
1100 s->slice_context_count = 1;
1101 } else free_duplicate_context(s);
1103 av_freep(&s->parse_context.buffer);
1104 s->parse_context.buffer_size = 0;
1106 av_freep(&s->bitstream_buffer);
1107 s->allocated_bitstream_buffer_size = 0;
1109 av_freep(&s->avctx->stats_out);
1110 av_freep(&s->ac_stats);
1112 av_freep(&s->q_intra_matrix);
1113 av_freep(&s->q_inter_matrix);
1114 av_freep(&s->q_intra_matrix16);
1115 av_freep(&s->q_inter_matrix16);
1116 av_freep(&s->input_picture);
1117 av_freep(&s->reordered_input_picture);
1118 av_freep(&s->dct_offset);
1120 if (s->picture && !s->avctx->internal->is_copy) {
1121 for (i = 0; i < s->picture_count; i++) {
1122 free_picture(s, &s->picture[i]);
1125 av_freep(&s->picture);
1127 free_context_frame(s);
1129 s->context_initialized = 0;
1130 s->last_picture_ptr =
1131 s->next_picture_ptr =
1132 s->current_picture_ptr = NULL;
1133 s->linesize = s->uvlinesize = 0;
1136 void ff_init_rl(RLTable *rl,
1137 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1139 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1140 uint8_t index_run[MAX_RUN + 1];
1141 int last, run, level, start, end, i;
1143 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1144 if (static_store && rl->max_level[0])
1147 /* compute max_level[], max_run[] and index_run[] */
1148 for (last = 0; last < 2; last++) {
1157 memset(max_level, 0, MAX_RUN + 1);
1158 memset(max_run, 0, MAX_LEVEL + 1);
1159 memset(index_run, rl->n, MAX_RUN + 1);
1160 for (i = start; i < end; i++) {
1161 run = rl->table_run[i];
1162 level = rl->table_level[i];
1163 if (index_run[run] == rl->n)
1165 if (level > max_level[run])
1166 max_level[run] = level;
1167 if (run > max_run[level])
1168 max_run[level] = run;
1171 rl->max_level[last] = static_store[last];
1173 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1174 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1176 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1178 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1179 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1181 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1183 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1184 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1188 void ff_init_vlc_rl(RLTable *rl)
1192 for (q = 0; q < 32; q++) {
1194 int qadd = (q - 1) | 1;
1200 for (i = 0; i < rl->vlc.table_size; i++) {
1201 int code = rl->vlc.table[i][0];
1202 int len = rl->vlc.table[i][1];
1205 if (len == 0) { // illegal code
1208 } else if (len < 0) { // more bits needed
1212 if (code == rl->n) { // esc
1216 run = rl->table_run[code] + 1;
1217 level = rl->table_level[code] * qmul + qadd;
1218 if (code >= rl->last) run += 192;
1221 rl->rl_vlc[q][i].len = len;
1222 rl->rl_vlc[q][i].level = level;
1223 rl->rl_vlc[q][i].run = run;
1228 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1232 /* release non reference frames */
1233 for (i = 0; i < s->picture_count; i++) {
1234 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1235 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1236 (remove_current || &s->picture[i] != s->current_picture_ptr)
1237 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1238 free_frame_buffer(s, &s->picture[i]);
1243 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1245 if (pic->f.data[0] == NULL)
1247 if (pic->needs_realloc)
1248 if (!pic->owner2 || pic->owner2 == s)
1253 static int find_unused_picture(MpegEncContext *s, int shared)
1258 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1259 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1263 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1264 if (pic_is_unused(s, &s->picture[i]) && s->picture[i].f.type != 0)
1267 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1268 if (pic_is_unused(s, &s->picture[i]))
1273 return AVERROR_INVALIDDATA;
1276 int ff_find_unused_picture(MpegEncContext *s, int shared)
1278 int ret = find_unused_picture(s, shared);
1280 if (ret >= 0 && ret < s->picture_range_end) {
1281 if (s->picture[ret].needs_realloc) {
1282 s->picture[ret].needs_realloc = 0;
1283 free_picture(s, &s->picture[ret]);
1284 avcodec_get_frame_defaults(&s->picture[ret].f);
1290 static void update_noise_reduction(MpegEncContext *s)
1294 for (intra = 0; intra < 2; intra++) {
1295 if (s->dct_count[intra] > (1 << 16)) {
1296 for (i = 0; i < 64; i++) {
1297 s->dct_error_sum[intra][i] >>= 1;
1299 s->dct_count[intra] >>= 1;
1302 for (i = 0; i < 64; i++) {
1303 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1304 s->dct_count[intra] +
1305 s->dct_error_sum[intra][i] / 2) /
1306 (s->dct_error_sum[intra][i] + 1);
1312 * generic function for encode/decode called after coding/decoding
1313 * the header and before a frame is coded/decoded.
1315 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1321 /* mark & release old frames */
1322 if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
1323 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1324 s->last_picture_ptr != s->next_picture_ptr &&
1325 s->last_picture_ptr->f.data[0]) {
1326 if (s->last_picture_ptr->owner2 == s)
1327 free_frame_buffer(s, s->last_picture_ptr);
1330 /* release forgotten pictures */
1331 /* if (mpeg124/h263) */
1333 for (i = 0; i < s->picture_count; i++) {
1334 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1335 &s->picture[i] != s->last_picture_ptr &&
1336 &s->picture[i] != s->next_picture_ptr &&
1337 s->picture[i].f.reference && !s->picture[i].needs_realloc) {
1338 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1339 av_log(avctx, AV_LOG_ERROR,
1340 "releasing zombie picture\n");
1341 free_frame_buffer(s, &s->picture[i]);
1348 ff_release_unused_pictures(s, 1);
1350 if (s->current_picture_ptr &&
1351 s->current_picture_ptr->f.data[0] == NULL) {
1352 // we already have a unused image
1353 // (maybe it was set before reading the header)
1354 pic = s->current_picture_ptr;
1356 i = ff_find_unused_picture(s, 0);
1358 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1361 pic = &s->picture[i];
1364 pic->f.reference = 0;
1365 if (!s->droppable) {
1366 if (s->codec_id == AV_CODEC_ID_H264)
1367 pic->f.reference = s->picture_structure;
1368 else if (s->pict_type != AV_PICTURE_TYPE_B)
1369 pic->f.reference = 3;
1372 pic->f.coded_picture_number = s->coded_picture_number++;
1374 if (ff_alloc_picture(s, pic, 0) < 0)
1377 s->current_picture_ptr = pic;
1378 // FIXME use only the vars from current_pic
1379 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1380 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1381 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1382 if (s->picture_structure != PICT_FRAME)
1383 s->current_picture_ptr->f.top_field_first =
1384 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1386 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1387 !s->progressive_sequence;
1388 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1391 s->current_picture_ptr->f.pict_type = s->pict_type;
1392 // if (s->flags && CODEC_FLAG_QSCALE)
1393 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1394 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1396 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1398 if (s->pict_type != AV_PICTURE_TYPE_B) {
1399 s->last_picture_ptr = s->next_picture_ptr;
1401 s->next_picture_ptr = s->current_picture_ptr;
1403 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1404 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1405 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1406 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1407 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1408 s->pict_type, s->droppable);
1410 if (s->codec_id != AV_CODEC_ID_H264) {
1411 if ((s->last_picture_ptr == NULL ||
1412 s->last_picture_ptr->f.data[0] == NULL) &&
1413 (s->pict_type != AV_PICTURE_TYPE_I ||
1414 s->picture_structure != PICT_FRAME)) {
1415 if (s->pict_type != AV_PICTURE_TYPE_I)
1416 av_log(avctx, AV_LOG_ERROR,
1417 "warning: first frame is no keyframe\n");
1418 else if (s->picture_structure != PICT_FRAME)
1419 av_log(avctx, AV_LOG_INFO,
1420 "allocate dummy last picture for field based first keyframe\n");
1422 /* Allocate a dummy frame */
1423 i = ff_find_unused_picture(s, 0);
1425 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1428 s->last_picture_ptr = &s->picture[i];
1429 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1430 s->last_picture_ptr = NULL;
1433 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1434 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1435 s->last_picture_ptr->f.reference = 3;
1437 if ((s->next_picture_ptr == NULL ||
1438 s->next_picture_ptr->f.data[0] == NULL) &&
1439 s->pict_type == AV_PICTURE_TYPE_B) {
1440 /* Allocate a dummy frame */
1441 i = ff_find_unused_picture(s, 0);
1443 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1446 s->next_picture_ptr = &s->picture[i];
1447 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1448 s->next_picture_ptr = NULL;
1451 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1452 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1453 s->next_picture_ptr->f.reference = 3;
1457 if (s->last_picture_ptr)
1458 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1459 if (s->next_picture_ptr)
1460 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1462 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME)) {
1463 if (s->next_picture_ptr)
1464 s->next_picture_ptr->owner2 = s;
1465 if (s->last_picture_ptr)
1466 s->last_picture_ptr->owner2 = s;
1469 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1470 s->last_picture_ptr->f.data[0]));
1472 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1474 for (i = 0; i < 4; i++) {
1475 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1476 s->current_picture.f.data[i] +=
1477 s->current_picture.f.linesize[i];
1479 s->current_picture.f.linesize[i] *= 2;
1480 s->last_picture.f.linesize[i] *= 2;
1481 s->next_picture.f.linesize[i] *= 2;
1485 s->err_recognition = avctx->err_recognition;
1487 /* set dequantizer, we can't do it during init as
1488 * it might change for mpeg4 and we can't do it in the header
1489 * decode as init is not called for mpeg4 there yet */
1490 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1491 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1492 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1493 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1494 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1495 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1497 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1498 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1501 if (s->dct_error_sum) {
1502 assert(s->avctx->noise_reduction && s->encoding);
1503 update_noise_reduction(s);
1506 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1507 return ff_xvmc_field_start(s, avctx);
1512 /* generic function for encode/decode called after a
1513 * frame has been coded/decoded. */
1514 void ff_MPV_frame_end(MpegEncContext *s)
1517 /* redraw edges for the frame if decoding didn't complete */
1518 // just to make sure that all data is rendered.
1519 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1520 ff_xvmc_field_end(s);
1521 } else if ((s->error_count || s->encoding) &&
1522 !s->avctx->hwaccel &&
1523 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1524 s->unrestricted_mv &&
1525 s->current_picture.f.reference &&
1527 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1528 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1529 int hshift = desc->log2_chroma_w;
1530 int vshift = desc->log2_chroma_h;
1531 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1532 s->h_edge_pos, s->v_edge_pos,
1533 EDGE_WIDTH, EDGE_WIDTH,
1534 EDGE_TOP | EDGE_BOTTOM);
1535 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1536 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1537 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1538 EDGE_TOP | EDGE_BOTTOM);
1539 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1540 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1541 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1542 EDGE_TOP | EDGE_BOTTOM);
1547 s->last_pict_type = s->pict_type;
1548 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1549 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1550 s->last_non_b_pict_type = s->pict_type;
1553 /* copy back current_picture variables */
1554 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1555 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1556 s->picture[i] = s->current_picture;
1560 assert(i < MAX_PICTURE_COUNT);
1564 /* release non-reference frames */
1565 for (i = 0; i < s->picture_count; i++) {
1566 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1567 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1568 free_frame_buffer(s, &s->picture[i]);
1572 // clear copies, to avoid confusion
1574 memset(&s->last_picture, 0, sizeof(Picture));
1575 memset(&s->next_picture, 0, sizeof(Picture));
1576 memset(&s->current_picture, 0, sizeof(Picture));
1578 s->avctx->coded_frame = &s->current_picture_ptr->f;
1580 if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
1581 ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
1586 * Draw a line from (ex, ey) -> (sx, sy).
1587 * @param w width of the image
1588 * @param h height of the image
1589 * @param stride stride/linesize of the image
1590 * @param color color of the arrow
1592 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1593 int w, int h, int stride, int color)
1597 sx = av_clip(sx, 0, w - 1);
1598 sy = av_clip(sy, 0, h - 1);
1599 ex = av_clip(ex, 0, w - 1);
1600 ey = av_clip(ey, 0, h - 1);
1602 buf[sy * stride + sx] += color;
1604 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1606 FFSWAP(int, sx, ex);
1607 FFSWAP(int, sy, ey);
1609 buf += sx + sy * stride;
1611 f = ((ey - sy) << 16) / ex;
1612 for (x = 0; x = ex; x++) {
1614 fr = (x * f) & 0xFFFF;
1615 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1616 buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1620 FFSWAP(int, sx, ex);
1621 FFSWAP(int, sy, ey);
1623 buf += sx + sy * stride;
1626 f = ((ex - sx) << 16) / ey;
1629 for (y = 0; y = ey; y++) {
1631 fr = (y * f) & 0xFFFF;
1632 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1633 buf[y * stride + x + 1] += (color * fr ) >> 16;
1639 * Draw an arrow from (ex, ey) -> (sx, sy).
1640 * @param w width of the image
1641 * @param h height of the image
1642 * @param stride stride/linesize of the image
1643 * @param color color of the arrow
1645 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1646 int ey, int w, int h, int stride, int color)
1650 sx = av_clip(sx, -100, w + 100);
1651 sy = av_clip(sy, -100, h + 100);
1652 ex = av_clip(ex, -100, w + 100);
1653 ey = av_clip(ey, -100, h + 100);
1658 if (dx * dx + dy * dy > 3 * 3) {
1661 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1663 // FIXME subpixel accuracy
1664 rx = ROUNDED_DIV(rx * 3 << 4, length);
1665 ry = ROUNDED_DIV(ry * 3 << 4, length);
1667 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1668 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1670 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1674 * Print debugging info for the given picture.
1676 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
1678 if (s->avctx->hwaccel || !pict || !pict->mb_type)
1681 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1684 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1685 switch (pict->pict_type) {
1686 case AV_PICTURE_TYPE_I:
1687 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1689 case AV_PICTURE_TYPE_P:
1690 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1692 case AV_PICTURE_TYPE_B:
1693 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1695 case AV_PICTURE_TYPE_S:
1696 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1698 case AV_PICTURE_TYPE_SI:
1699 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1701 case AV_PICTURE_TYPE_SP:
1702 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1705 for (y = 0; y < s->mb_height; y++) {
1706 for (x = 0; x < s->mb_width; x++) {
1707 if (s->avctx->debug & FF_DEBUG_SKIP) {
1708 int count = s->mbskip_table[x + y * s->mb_stride];
1711 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1713 if (s->avctx->debug & FF_DEBUG_QP) {
1714 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1715 pict->qscale_table[x + y * s->mb_stride]);
1717 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1718 int mb_type = pict->mb_type[x + y * s->mb_stride];
1719 // Type & MV direction
1720 if (IS_PCM(mb_type))
1721 av_log(s->avctx, AV_LOG_DEBUG, "P");
1722 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1723 av_log(s->avctx, AV_LOG_DEBUG, "A");
1724 else if (IS_INTRA4x4(mb_type))
1725 av_log(s->avctx, AV_LOG_DEBUG, "i");
1726 else if (IS_INTRA16x16(mb_type))
1727 av_log(s->avctx, AV_LOG_DEBUG, "I");
1728 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1729 av_log(s->avctx, AV_LOG_DEBUG, "d");
1730 else if (IS_DIRECT(mb_type))
1731 av_log(s->avctx, AV_LOG_DEBUG, "D");
1732 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1733 av_log(s->avctx, AV_LOG_DEBUG, "g");
1734 else if (IS_GMC(mb_type))
1735 av_log(s->avctx, AV_LOG_DEBUG, "G");
1736 else if (IS_SKIP(mb_type))
1737 av_log(s->avctx, AV_LOG_DEBUG, "S");
1738 else if (!USES_LIST(mb_type, 1))
1739 av_log(s->avctx, AV_LOG_DEBUG, ">");
1740 else if (!USES_LIST(mb_type, 0))
1741 av_log(s->avctx, AV_LOG_DEBUG, "<");
1743 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1744 av_log(s->avctx, AV_LOG_DEBUG, "X");
1748 if (IS_8X8(mb_type))
1749 av_log(s->avctx, AV_LOG_DEBUG, "+");
1750 else if (IS_16X8(mb_type))
1751 av_log(s->avctx, AV_LOG_DEBUG, "-");
1752 else if (IS_8X16(mb_type))
1753 av_log(s->avctx, AV_LOG_DEBUG, "|");
1754 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1755 av_log(s->avctx, AV_LOG_DEBUG, " ");
1757 av_log(s->avctx, AV_LOG_DEBUG, "?");
1760 if (IS_INTERLACED(mb_type))
1761 av_log(s->avctx, AV_LOG_DEBUG, "=");
1763 av_log(s->avctx, AV_LOG_DEBUG, " ");
1766 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1770 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1771 (s->avctx->debug_mv)) {
1772 const int shift = 1 + s->quarter_sample;
1776 int h_chroma_shift, v_chroma_shift, block_height;
1777 const int width = s->avctx->width;
1778 const int height = s->avctx->height;
1779 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1780 const int mv_stride = (s->mb_width << mv_sample_log2) +
1781 (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
1782 s->low_delay = 0; // needed to see the vectors without trashing the buffers
1784 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1785 &h_chroma_shift, &v_chroma_shift);
1786 for (i = 0; i < 3; i++) {
1787 memcpy(s->visualization_buffer[i], pict->data[i],
1788 (i == 0) ? pict->linesize[i] * height:
1789 pict->linesize[i] * height >> v_chroma_shift);
1790 pict->data[i] = s->visualization_buffer[i];
1792 pict->type = FF_BUFFER_TYPE_COPY;
1793 ptr = pict->data[0];
1794 block_height = 16 >> v_chroma_shift;
1796 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1798 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1799 const int mb_index = mb_x + mb_y * s->mb_stride;
1800 if ((s->avctx->debug_mv) && pict->motion_val) {
1802 for (type = 0; type < 3; type++) {
1806 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1807 (pict->pict_type!= AV_PICTURE_TYPE_P))
1812 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1813 (pict->pict_type!= AV_PICTURE_TYPE_B))
1818 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1819 (pict->pict_type!= AV_PICTURE_TYPE_B))
1824 if (!USES_LIST(pict->mb_type[mb_index], direction))
1827 if (IS_8X8(pict->mb_type[mb_index])) {
1829 for (i = 0; i < 4; i++) {
1830 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1831 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1832 int xy = (mb_x * 2 + (i & 1) +
1833 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1834 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1835 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1836 draw_arrow(ptr, sx, sy, mx, my, width,
1837 height, s->linesize, 100);
1839 } else if (IS_16X8(pict->mb_type[mb_index])) {
1841 for (i = 0; i < 2; i++) {
1842 int sx = mb_x * 16 + 8;
1843 int sy = mb_y * 16 + 4 + 8 * i;
1844 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1845 int mx = (pict->motion_val[direction][xy][0] >> shift);
1846 int my = (pict->motion_val[direction][xy][1] >> shift);
1848 if (IS_INTERLACED(pict->mb_type[mb_index]))
1851 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1852 height, s->linesize, 100);
1854 } else if (IS_8X16(pict->mb_type[mb_index])) {
1856 for (i = 0; i < 2; i++) {
1857 int sx = mb_x * 16 + 4 + 8 * i;
1858 int sy = mb_y * 16 + 8;
1859 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1860 int mx = pict->motion_val[direction][xy][0] >> shift;
1861 int my = pict->motion_val[direction][xy][1] >> shift;
1863 if (IS_INTERLACED(pict->mb_type[mb_index]))
1866 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1867 height, s->linesize, 100);
1870 int sx = mb_x * 16 + 8;
1871 int sy = mb_y * 16 + 8;
1872 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
1873 int mx = pict->motion_val[direction][xy][0] >> shift + sx;
1874 int my = pict->motion_val[direction][xy][1] >> shift + sy;
1875 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1879 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1880 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1881 0x0101010101010101ULL;
1883 for (y = 0; y < block_height; y++) {
1884 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1885 (block_height * mb_y + y) *
1886 pict->linesize[1]) = c;
1887 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1888 (block_height * mb_y + y) *
1889 pict->linesize[2]) = c;
1892 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1894 int mb_type = pict->mb_type[mb_index];
1897 #define COLOR(theta, r) \
1898 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1899 v = (int)(128 + r * sin(theta * 3.141592 / 180));
1903 if (IS_PCM(mb_type)) {
1905 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1906 IS_INTRA16x16(mb_type)) {
1908 } else if (IS_INTRA4x4(mb_type)) {
1910 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1912 } else if (IS_DIRECT(mb_type)) {
1914 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1916 } else if (IS_GMC(mb_type)) {
1918 } else if (IS_SKIP(mb_type)) {
1920 } else if (!USES_LIST(mb_type, 1)) {
1922 } else if (!USES_LIST(mb_type, 0)) {
1925 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1929 u *= 0x0101010101010101ULL;
1930 v *= 0x0101010101010101ULL;
1931 for (y = 0; y < block_height; y++) {
1932 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1933 (block_height * mb_y + y) * pict->linesize[1]) = u;
1934 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1935 (block_height * mb_y + y) * pict->linesize[2]) = v;
1939 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
1940 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
1941 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1942 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
1943 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1945 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
1946 for (y = 0; y < 16; y++)
1947 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
1948 pict->linesize[0]] ^= 0x80;
1950 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
1951 int dm = 1 << (mv_sample_log2 - 2);
1952 for (i = 0; i < 4; i++) {
1953 int sx = mb_x * 16 + 8 * (i & 1);
1954 int sy = mb_y * 16 + 8 * (i >> 1);
1955 int xy = (mb_x * 2 + (i & 1) +
1956 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1958 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
1959 if (mv[0] != mv[dm] ||
1960 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
1961 for (y = 0; y < 8; y++)
1962 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
1963 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
1964 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
1965 pict->linesize[0]) ^= 0x8080808080808080ULL;
1969 if (IS_INTERLACED(mb_type) &&
1970 s->codec_id == AV_CODEC_ID_H264) {
1974 s->mbskip_table[mb_index] = 0;
1981 * find the lowest MB row referenced in the MVs
1983 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1985 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1986 int my, off, i, mvs;
1988 if (s->picture_structure != PICT_FRAME || s->mcsel)
1991 switch (s->mv_type) {
2005 for (i = 0; i < mvs; i++) {
2006 my = s->mv[dir][i][1]<<qpel_shift;
2007 my_max = FFMAX(my_max, my);
2008 my_min = FFMIN(my_min, my);
2011 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2013 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2015 return s->mb_height-1;
2018 /* put block[] to dest[] */
2019 static inline void put_dct(MpegEncContext *s,
2020 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2022 s->dct_unquantize_intra(s, block, i, qscale);
2023 s->dsp.idct_put (dest, line_size, block);
2026 /* add block[] to dest[] */
2027 static inline void add_dct(MpegEncContext *s,
2028 DCTELEM *block, int i, uint8_t *dest, int line_size)
2030 if (s->block_last_index[i] >= 0) {
2031 s->dsp.idct_add (dest, line_size, block);
2035 static inline void add_dequant_dct(MpegEncContext *s,
2036 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2038 if (s->block_last_index[i] >= 0) {
2039 s->dct_unquantize_inter(s, block, i, qscale);
2041 s->dsp.idct_add (dest, line_size, block);
2046 * Clean dc, ac, coded_block for the current non-intra MB.
2048 void ff_clean_intra_table_entries(MpegEncContext *s)
2050 int wrap = s->b8_stride;
2051 int xy = s->block_index[0];
2054 s->dc_val[0][xy + 1 ] =
2055 s->dc_val[0][xy + wrap] =
2056 s->dc_val[0][xy + 1 + wrap] = 1024;
2058 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2059 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2060 if (s->msmpeg4_version>=3) {
2061 s->coded_block[xy ] =
2062 s->coded_block[xy + 1 ] =
2063 s->coded_block[xy + wrap] =
2064 s->coded_block[xy + 1 + wrap] = 0;
2067 wrap = s->mb_stride;
2068 xy = s->mb_x + s->mb_y * wrap;
2070 s->dc_val[2][xy] = 1024;
2072 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2073 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2075 s->mbintra_table[xy]= 0;
2078 /* generic function called after a macroblock has been parsed by the
2079 decoder or after it has been encoded by the encoder.
2081 Important variables used:
2082 s->mb_intra : true if intra macroblock
2083 s->mv_dir : motion vector direction
2084 s->mv_type : motion vector type
2085 s->mv : motion vector
2086 s->interlaced_dct : true if interlaced dct used (mpeg2)
2088 static av_always_inline
2089 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2092 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2093 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2094 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2098 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2099 /* save DCT coefficients */
2101 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2102 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2104 for(j=0; j<64; j++){
2105 *dct++ = block[i][s->dsp.idct_permutation[j]];
2106 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2108 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2112 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2114 /* update DC predictors for P macroblocks */
2116 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2117 if(s->mbintra_table[mb_xy])
2118 ff_clean_intra_table_entries(s);
2122 s->last_dc[2] = 128 << s->intra_dc_precision;
2125 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2126 s->mbintra_table[mb_xy]=1;
2128 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2129 uint8_t *dest_y, *dest_cb, *dest_cr;
2130 int dct_linesize, dct_offset;
2131 op_pixels_func (*op_pix)[4];
2132 qpel_mc_func (*op_qpix)[16];
2133 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2134 const int uvlinesize = s->current_picture.f.linesize[1];
2135 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2136 const int block_size = 8;
2138 /* avoid copy if macroblock skipped in last frame too */
2139 /* skip only during decoding as we might trash the buffers during encoding a bit */
2141 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2143 if (s->mb_skipped) {
2145 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2147 } else if(!s->current_picture.f.reference) {
2150 *mbskip_ptr = 0; /* not skipped */
2154 dct_linesize = linesize << s->interlaced_dct;
2155 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2159 dest_cb= s->dest[1];
2160 dest_cr= s->dest[2];
2162 dest_y = s->b_scratchpad;
2163 dest_cb= s->b_scratchpad+16*linesize;
2164 dest_cr= s->b_scratchpad+32*linesize;
2168 /* motion handling */
2169 /* decoding or more than one mb_type (MC was already done otherwise) */
2172 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2173 if (s->mv_dir & MV_DIR_FORWARD) {
2174 ff_thread_await_progress(&s->last_picture_ptr->f,
2175 ff_MPV_lowest_referenced_row(s, 0),
2178 if (s->mv_dir & MV_DIR_BACKWARD) {
2179 ff_thread_await_progress(&s->next_picture_ptr->f,
2180 ff_MPV_lowest_referenced_row(s, 1),
2185 op_qpix= s->me.qpel_put;
2186 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2187 op_pix = s->dsp.put_pixels_tab;
2189 op_pix = s->dsp.put_no_rnd_pixels_tab;
2191 if (s->mv_dir & MV_DIR_FORWARD) {
2192 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2193 op_pix = s->dsp.avg_pixels_tab;
2194 op_qpix= s->me.qpel_avg;
2196 if (s->mv_dir & MV_DIR_BACKWARD) {
2197 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2201 /* skip dequant / idct if we are really late ;) */
2202 if(s->avctx->skip_idct){
2203 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2204 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2205 || s->avctx->skip_idct >= AVDISCARD_ALL)
2209 /* add dct residue */
2210 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2211 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2212 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2213 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2214 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2215 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2217 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2218 if (s->chroma_y_shift){
2219 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2220 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2224 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2225 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2226 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2227 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2230 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2231 add_dct(s, block[0], 0, dest_y , dct_linesize);
2232 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2233 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2234 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2236 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2237 if(s->chroma_y_shift){//Chroma420
2238 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2239 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2242 dct_linesize = uvlinesize << s->interlaced_dct;
2243 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2245 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2246 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2247 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2248 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2249 if(!s->chroma_x_shift){//Chroma444
2250 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2251 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2252 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2253 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2258 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2259 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2262 /* dct only in intra block */
2263 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2264 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2265 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2266 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2267 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2269 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2270 if(s->chroma_y_shift){
2271 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2272 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2276 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2277 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2278 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2279 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2283 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2284 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2285 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2286 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2288 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2289 if(s->chroma_y_shift){
2290 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2291 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2294 dct_linesize = uvlinesize << s->interlaced_dct;
2295 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2297 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2298 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2299 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2300 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2301 if(!s->chroma_x_shift){//Chroma444
2302 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2303 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2304 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2305 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2313 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2314 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2315 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2320 void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2322 if(s->out_format == FMT_MPEG1) {
2323 MPV_decode_mb_internal(s, block, 1);
2326 MPV_decode_mb_internal(s, block, 0);
2330 * @param h is the normal height, this will be reduced automatically if needed for the last row
2332 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2333 const int field_pic= s->picture_structure != PICT_FRAME;
2339 if (!s->avctx->hwaccel
2340 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2341 && s->unrestricted_mv
2342 && s->current_picture.f.reference
2344 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2345 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
2346 int sides = 0, edge_h;
2347 int hshift = desc->log2_chroma_w;
2348 int vshift = desc->log2_chroma_h;
2349 if (y==0) sides |= EDGE_TOP;
2350 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2352 edge_h= FFMIN(h, s->v_edge_pos - y);
2354 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2355 s->linesize, s->h_edge_pos, edge_h,
2356 EDGE_WIDTH, EDGE_WIDTH, sides);
2357 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2358 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2359 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2360 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2361 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2362 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2365 h= FFMIN(h, s->avctx->height - y);
2367 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2369 if (s->avctx->draw_horiz_band) {
2371 int offset[AV_NUM_DATA_POINTERS];
2374 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2375 src = &s->current_picture_ptr->f;
2376 else if(s->last_picture_ptr)
2377 src = &s->last_picture_ptr->f;
2381 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2382 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2385 offset[0]= y * s->linesize;
2387 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2388 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2394 s->avctx->draw_horiz_band(s->avctx, src, offset,
2395 y, s->picture_structure, h);
2399 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2400 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2401 const int uvlinesize = s->current_picture.f.linesize[1];
2402 const int mb_size= 4;
2404 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2405 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2406 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2407 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2408 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2409 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2410 //block_index is not used by mpeg2, so it is not affected by chroma_format
2412 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2413 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2414 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2416 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2418 if(s->picture_structure==PICT_FRAME){
2419 s->dest[0] += s->mb_y * linesize << mb_size;
2420 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2421 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2423 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2424 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2425 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2426 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2431 void ff_mpeg_flush(AVCodecContext *avctx){
2433 MpegEncContext *s = avctx->priv_data;
2435 if(s==NULL || s->picture==NULL)
2438 for(i=0; i<s->picture_count; i++){
2439 if (s->picture[i].f.data[0] &&
2440 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2441 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2442 free_frame_buffer(s, &s->picture[i]);
2444 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2446 s->mb_x= s->mb_y= 0;
2448 s->parse_context.state= -1;
2449 s->parse_context.frame_start_found= 0;
2450 s->parse_context.overread= 0;
2451 s->parse_context.overread_index= 0;
2452 s->parse_context.index= 0;
2453 s->parse_context.last_index= 0;
2454 s->bitstream_buffer_size=0;
2458 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2459 DCTELEM *block, int n, int qscale)
2461 int i, level, nCoeffs;
2462 const uint16_t *quant_matrix;
2464 nCoeffs= s->block_last_index[n];
2467 block[0] = block[0] * s->y_dc_scale;
2469 block[0] = block[0] * s->c_dc_scale;
2470 /* XXX: only mpeg1 */
2471 quant_matrix = s->intra_matrix;
2472 for(i=1;i<=nCoeffs;i++) {
2473 int j= s->intra_scantable.permutated[i];
2478 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2479 level = (level - 1) | 1;
2482 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2483 level = (level - 1) | 1;
2490 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2491 DCTELEM *block, int n, int qscale)
2493 int i, level, nCoeffs;
2494 const uint16_t *quant_matrix;
2496 nCoeffs= s->block_last_index[n];
2498 quant_matrix = s->inter_matrix;
2499 for(i=0; i<=nCoeffs; i++) {
2500 int j= s->intra_scantable.permutated[i];
2505 level = (((level << 1) + 1) * qscale *
2506 ((int) (quant_matrix[j]))) >> 4;
2507 level = (level - 1) | 1;
2510 level = (((level << 1) + 1) * qscale *
2511 ((int) (quant_matrix[j]))) >> 4;
2512 level = (level - 1) | 1;
2519 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2520 DCTELEM *block, int n, int qscale)
2522 int i, level, nCoeffs;
2523 const uint16_t *quant_matrix;
2525 if(s->alternate_scan) nCoeffs= 63;
2526 else nCoeffs= s->block_last_index[n];
2529 block[0] = block[0] * s->y_dc_scale;
2531 block[0] = block[0] * s->c_dc_scale;
2532 quant_matrix = s->intra_matrix;
2533 for(i=1;i<=nCoeffs;i++) {
2534 int j= s->intra_scantable.permutated[i];
2539 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2542 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2549 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2550 DCTELEM *block, int n, int qscale)
2552 int i, level, nCoeffs;
2553 const uint16_t *quant_matrix;
2556 if(s->alternate_scan) nCoeffs= 63;
2557 else nCoeffs= s->block_last_index[n];
2560 block[0] = block[0] * s->y_dc_scale;
2562 block[0] = block[0] * s->c_dc_scale;
2563 quant_matrix = s->intra_matrix;
2564 for(i=1;i<=nCoeffs;i++) {
2565 int j= s->intra_scantable.permutated[i];
2570 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2573 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2582 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2583 DCTELEM *block, int n, int qscale)
2585 int i, level, nCoeffs;
2586 const uint16_t *quant_matrix;
2589 if(s->alternate_scan) nCoeffs= 63;
2590 else nCoeffs= s->block_last_index[n];
2592 quant_matrix = s->inter_matrix;
2593 for(i=0; i<=nCoeffs; i++) {
2594 int j= s->intra_scantable.permutated[i];
2599 level = (((level << 1) + 1) * qscale *
2600 ((int) (quant_matrix[j]))) >> 4;
2603 level = (((level << 1) + 1) * qscale *
2604 ((int) (quant_matrix[j]))) >> 4;
2613 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2614 DCTELEM *block, int n, int qscale)
2616 int i, level, qmul, qadd;
2619 assert(s->block_last_index[n]>=0);
2625 block[0] = block[0] * s->y_dc_scale;
2627 block[0] = block[0] * s->c_dc_scale;
2628 qadd = (qscale - 1) | 1;
2635 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2637 for(i=1; i<=nCoeffs; i++) {
2641 level = level * qmul - qadd;
2643 level = level * qmul + qadd;
2650 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2651 DCTELEM *block, int n, int qscale)
2653 int i, level, qmul, qadd;
2656 assert(s->block_last_index[n]>=0);
2658 qadd = (qscale - 1) | 1;
2661 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2663 for(i=0; i<=nCoeffs; i++) {
2667 level = level * qmul - qadd;
2669 level = level * qmul + qadd;
2677 * set qscale and update qscale dependent variables.
2679 void ff_set_qscale(MpegEncContext * s, int qscale)
2683 else if (qscale > 31)
2687 s->chroma_qscale= s->chroma_qscale_table[qscale];
2689 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2690 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2693 void ff_MPV_report_decode_progress(MpegEncContext *s)
2695 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2696 ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);