2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
45 #include "mpegvideo.h"
46 #include "mpegvideodata.h"
50 #include "mjpegenc_common.h"
52 #include "mpegutils.h"
55 #include "pixblockdsp.h"
59 #include "aandcttab.h"
61 #include "mpeg4video.h"
63 #include "bytestream.h"
70 #define QUANT_BIAS_SHIFT 8
72 #define QMAT_SHIFT_MMX 16
75 static int encode_picture(MpegEncContext *s, int picture_number);
76 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
77 static int sse_mb(MpegEncContext *s);
78 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
79 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
81 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
82 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
84 const AVOption ff_mpv_generic_options[] = {
89 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
90 uint16_t (*qmat16)[2][64],
91 const uint16_t *quant_matrix,
92 int bias, int qmin, int qmax, int intra)
94 FDCTDSPContext *fdsp = &s->fdsp;
98 for (qscale = qmin; qscale <= qmax; qscale++) {
102 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
103 else qscale2 = qscale << 1;
105 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
107 fdsp->fdct == ff_faandct ||
108 #endif /* CONFIG_FAANDCT */
109 fdsp->fdct == ff_jpeg_fdct_islow_10) {
110 for (i = 0; i < 64; i++) {
111 const int j = s->idsp.idct_permutation[i];
112 int64_t den = (int64_t) qscale2 * quant_matrix[j];
113 /* 16 <= qscale * quant_matrix[i] <= 7905
114 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
115 * 19952 <= x <= 249205026
116 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
117 * 3444240 >= (1 << 36) / (x) >= 275 */
119 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
121 } else if (fdsp->fdct == ff_fdct_ifast) {
122 for (i = 0; i < 64; i++) {
123 const int j = s->idsp.idct_permutation[i];
124 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
125 /* 16 <= qscale * quant_matrix[i] <= 7905
126 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
127 * 19952 <= x <= 249205026
128 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
129 * 3444240 >= (1 << 36) / (x) >= 275 */
131 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
134 for (i = 0; i < 64; i++) {
135 const int j = s->idsp.idct_permutation[i];
136 int64_t den = (int64_t) qscale2 * quant_matrix[j];
137 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
138 * Assume x = qscale * quant_matrix[i]
140 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
141 * so 32768 >= (1 << 19) / (x) >= 67 */
142 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
143 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
144 // (qscale * quant_matrix[i]);
145 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
147 if (qmat16[qscale][0][i] == 0 ||
148 qmat16[qscale][0][i] == 128 * 256)
149 qmat16[qscale][0][i] = 128 * 256 - 1;
150 qmat16[qscale][1][i] =
151 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
152 qmat16[qscale][0][i]);
156 for (i = intra; i < 64; i++) {
158 if (fdsp->fdct == ff_fdct_ifast) {
159 max = (8191LL * ff_aanscales[i]) >> 14;
161 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
167 av_log(s->avctx, AV_LOG_INFO,
168 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
173 static inline void update_qscale(MpegEncContext *s)
175 if (s->q_scale_type == 1 && 0) {
177 int bestdiff=INT_MAX;
180 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
181 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
182 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
183 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
185 if (diff < bestdiff) {
192 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
193 (FF_LAMBDA_SHIFT + 7);
194 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
197 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
201 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
207 for (i = 0; i < 64; i++) {
208 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
215 * init s->current_picture.qscale_table from s->lambda_table
217 void ff_init_qscale_tab(MpegEncContext *s)
219 int8_t * const qscale_table = s->current_picture.qscale_table;
222 for (i = 0; i < s->mb_num; i++) {
223 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
224 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
225 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
230 static void update_duplicate_context_after_me(MpegEncContext *dst,
233 #define COPY(a) dst->a= src->a
235 COPY(current_picture);
241 COPY(picture_in_gop_number);
242 COPY(gop_picture_number);
243 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
244 COPY(progressive_frame); // FIXME don't set in encode_header
245 COPY(partitioned_frame); // FIXME don't set in encode_header
250 * Set the given MpegEncContext to defaults for encoding.
251 * the changed fields will not depend upon the prior state of the MpegEncContext.
253 static void mpv_encode_defaults(MpegEncContext *s)
256 ff_mpv_common_defaults(s);
258 for (i = -16; i < 16; i++) {
259 default_fcode_tab[i + MAX_MV] = 1;
261 s->me.mv_penalty = default_mv_penalty;
262 s->fcode_tab = default_fcode_tab;
264 s->input_picture_number = 0;
265 s->picture_in_gop_number = 0;
268 av_cold int ff_dct_encode_init(MpegEncContext *s)
271 ff_dct_encode_init_x86(s);
273 if (CONFIG_H263_ENCODER)
274 ff_h263dsp_init(&s->h263dsp);
275 if (!s->dct_quantize)
276 s->dct_quantize = ff_dct_quantize_c;
278 s->denoise_dct = denoise_dct_c;
279 s->fast_dct_quantize = s->dct_quantize;
280 if (s->avctx->trellis)
281 s->dct_quantize = dct_quantize_trellis_c;
286 /* init video encoder */
287 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
289 MpegEncContext *s = avctx->priv_data;
290 AVCPBProperties *cpb_props;
291 int i, ret, format_supported;
293 mpv_encode_defaults(s);
295 switch (avctx->codec_id) {
296 case AV_CODEC_ID_MPEG2VIDEO:
297 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
298 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
299 av_log(avctx, AV_LOG_ERROR,
300 "only YUV420 and YUV422 are supported\n");
304 case AV_CODEC_ID_MJPEG:
305 case AV_CODEC_ID_AMV:
306 format_supported = 0;
307 /* JPEG color space */
308 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
309 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
310 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
311 (avctx->color_range == AVCOL_RANGE_JPEG &&
312 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
313 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
314 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
315 format_supported = 1;
316 /* MPEG color space */
317 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
318 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
319 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
320 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
321 format_supported = 1;
323 if (!format_supported) {
324 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
329 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
330 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
335 switch (avctx->pix_fmt) {
336 case AV_PIX_FMT_YUVJ444P:
337 case AV_PIX_FMT_YUV444P:
338 s->chroma_format = CHROMA_444;
340 case AV_PIX_FMT_YUVJ422P:
341 case AV_PIX_FMT_YUV422P:
342 s->chroma_format = CHROMA_422;
344 case AV_PIX_FMT_YUVJ420P:
345 case AV_PIX_FMT_YUV420P:
347 s->chroma_format = CHROMA_420;
351 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
353 #if FF_API_PRIVATE_OPT
354 FF_DISABLE_DEPRECATION_WARNINGS
355 if (avctx->rtp_payload_size)
356 s->rtp_payload_size = avctx->rtp_payload_size;
357 if (avctx->me_penalty_compensation)
358 s->me_penalty_compensation = avctx->me_penalty_compensation;
360 s->me_pre = avctx->pre_me;
361 FF_ENABLE_DEPRECATION_WARNINGS
364 s->bit_rate = avctx->bit_rate;
365 s->width = avctx->width;
366 s->height = avctx->height;
367 if (avctx->gop_size > 600 &&
368 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
369 av_log(avctx, AV_LOG_WARNING,
370 "keyframe interval too large!, reducing it from %d to %d\n",
371 avctx->gop_size, 600);
372 avctx->gop_size = 600;
374 s->gop_size = avctx->gop_size;
376 if (avctx->max_b_frames > MAX_B_FRAMES) {
377 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
378 "is %d.\n", MAX_B_FRAMES);
379 avctx->max_b_frames = MAX_B_FRAMES;
381 s->max_b_frames = avctx->max_b_frames;
382 s->codec_id = avctx->codec->id;
383 s->strict_std_compliance = avctx->strict_std_compliance;
384 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
385 s->rtp_mode = !!s->rtp_payload_size;
386 s->intra_dc_precision = avctx->intra_dc_precision;
388 // workaround some differences between how applications specify dc precision
389 if (s->intra_dc_precision < 0) {
390 s->intra_dc_precision += 8;
391 } else if (s->intra_dc_precision >= 8)
392 s->intra_dc_precision -= 8;
394 if (s->intra_dc_precision < 0) {
395 av_log(avctx, AV_LOG_ERROR,
396 "intra dc precision must be positive, note some applications use"
397 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
398 return AVERROR(EINVAL);
401 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
404 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
405 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
406 return AVERROR(EINVAL);
408 s->user_specified_pts = AV_NOPTS_VALUE;
410 if (s->gop_size <= 1) {
418 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
420 s->adaptive_quant = (s->avctx->lumi_masking ||
421 s->avctx->dark_masking ||
422 s->avctx->temporal_cplx_masking ||
423 s->avctx->spatial_cplx_masking ||
424 s->avctx->p_masking ||
426 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
429 s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
431 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
432 switch(avctx->codec_id) {
433 case AV_CODEC_ID_MPEG1VIDEO:
434 case AV_CODEC_ID_MPEG2VIDEO:
435 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
437 case AV_CODEC_ID_MPEG4:
438 case AV_CODEC_ID_MSMPEG4V1:
439 case AV_CODEC_ID_MSMPEG4V2:
440 case AV_CODEC_ID_MSMPEG4V3:
441 if (avctx->rc_max_rate >= 15000000) {
442 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
443 } else if(avctx->rc_max_rate >= 2000000) {
444 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
445 } else if(avctx->rc_max_rate >= 384000) {
446 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
448 avctx->rc_buffer_size = 40;
449 avctx->rc_buffer_size *= 16384;
452 if (avctx->rc_buffer_size) {
453 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
457 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
458 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
462 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
463 av_log(avctx, AV_LOG_INFO,
464 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
467 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
468 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
472 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
473 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
477 if (avctx->rc_max_rate &&
478 avctx->rc_max_rate == avctx->bit_rate &&
479 avctx->rc_max_rate != avctx->rc_min_rate) {
480 av_log(avctx, AV_LOG_INFO,
481 "impossible bitrate constraints, this will fail\n");
484 if (avctx->rc_buffer_size &&
485 avctx->bit_rate * (int64_t)avctx->time_base.num >
486 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
487 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
491 if (!s->fixed_qscale &&
492 avctx->bit_rate * av_q2d(avctx->time_base) >
493 avctx->bit_rate_tolerance) {
494 av_log(avctx, AV_LOG_WARNING,
495 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
496 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
499 if (s->avctx->rc_max_rate &&
500 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
501 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
502 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
503 90000LL * (avctx->rc_buffer_size - 1) >
504 s->avctx->rc_max_rate * 0xFFFFLL) {
505 av_log(avctx, AV_LOG_INFO,
506 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
507 "specified vbv buffer is too large for the given bitrate!\n");
510 if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
511 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
512 s->codec_id != AV_CODEC_ID_FLV1) {
513 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
517 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
518 av_log(avctx, AV_LOG_ERROR,
519 "OBMC is only supported with simple mb decision\n");
523 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
524 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
528 if (s->max_b_frames &&
529 s->codec_id != AV_CODEC_ID_MPEG4 &&
530 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
531 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
532 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
535 if (s->max_b_frames < 0) {
536 av_log(avctx, AV_LOG_ERROR,
537 "max b frames must be 0 or positive for mpegvideo based encoders\n");
541 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
542 s->codec_id == AV_CODEC_ID_H263 ||
543 s->codec_id == AV_CODEC_ID_H263P) &&
544 (avctx->sample_aspect_ratio.num > 255 ||
545 avctx->sample_aspect_ratio.den > 255)) {
546 av_log(avctx, AV_LOG_WARNING,
547 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
548 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
549 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
550 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
553 if ((s->codec_id == AV_CODEC_ID_H263 ||
554 s->codec_id == AV_CODEC_ID_H263P) &&
555 (avctx->width > 2048 ||
556 avctx->height > 1152 )) {
557 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
560 if ((s->codec_id == AV_CODEC_ID_H263 ||
561 s->codec_id == AV_CODEC_ID_H263P) &&
562 ((avctx->width &3) ||
563 (avctx->height&3) )) {
564 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
568 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
569 (avctx->width > 4095 ||
570 avctx->height > 4095 )) {
571 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
575 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
576 (avctx->width > 16383 ||
577 avctx->height > 16383 )) {
578 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
582 if (s->codec_id == AV_CODEC_ID_RV10 &&
584 avctx->height&15 )) {
585 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
586 return AVERROR(EINVAL);
589 if (s->codec_id == AV_CODEC_ID_RV20 &&
592 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
593 return AVERROR(EINVAL);
596 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
597 s->codec_id == AV_CODEC_ID_WMV2) &&
599 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
603 if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
604 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
605 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
609 #if FF_API_PRIVATE_OPT
610 FF_DISABLE_DEPRECATION_WARNINGS
611 if (avctx->mpeg_quant)
612 s->mpeg_quant = avctx->mpeg_quant;
613 FF_ENABLE_DEPRECATION_WARNINGS
616 // FIXME mpeg2 uses that too
617 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
618 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
619 av_log(avctx, AV_LOG_ERROR,
620 "mpeg2 style quantization not supported by codec\n");
624 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
625 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
629 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
630 s->avctx->mb_decision != FF_MB_DECISION_RD) {
631 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
635 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
636 (s->codec_id == AV_CODEC_ID_AMV ||
637 s->codec_id == AV_CODEC_ID_MJPEG)) {
638 // Used to produce garbage with MJPEG.
639 av_log(avctx, AV_LOG_ERROR,
640 "QP RD is no longer compatible with MJPEG or AMV\n");
644 #if FF_API_PRIVATE_OPT
645 FF_DISABLE_DEPRECATION_WARNINGS
646 if (avctx->scenechange_threshold)
647 s->scenechange_threshold = avctx->scenechange_threshold;
648 FF_ENABLE_DEPRECATION_WARNINGS
651 if (s->scenechange_threshold < 1000000000 &&
652 (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
653 av_log(avctx, AV_LOG_ERROR,
654 "closed gop with scene change detection are not supported yet, "
655 "set threshold to 1000000000\n");
659 if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
660 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
661 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
662 av_log(avctx, AV_LOG_ERROR,
663 "low delay forcing is only available for mpeg2, "
664 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
667 if (s->max_b_frames != 0) {
668 av_log(avctx, AV_LOG_ERROR,
669 "B-frames cannot be used with low delay\n");
674 if (s->q_scale_type == 1) {
675 if (avctx->qmax > 28) {
676 av_log(avctx, AV_LOG_ERROR,
677 "non linear quant only supports qmax <= 28 currently\n");
682 if (avctx->slices > 1 &&
683 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
684 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
685 return AVERROR(EINVAL);
688 if (s->avctx->thread_count > 1 &&
689 s->codec_id != AV_CODEC_ID_MPEG4 &&
690 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
691 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
692 s->codec_id != AV_CODEC_ID_MJPEG &&
693 (s->codec_id != AV_CODEC_ID_H263P)) {
694 av_log(avctx, AV_LOG_ERROR,
695 "multi threaded encoding not supported by codec\n");
699 if (s->avctx->thread_count < 1) {
700 av_log(avctx, AV_LOG_ERROR,
701 "automatic thread number detection not supported by codec, "
706 if (!avctx->time_base.den || !avctx->time_base.num) {
707 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
711 #if FF_API_PRIVATE_OPT
712 FF_DISABLE_DEPRECATION_WARNINGS
713 if (avctx->b_frame_strategy)
714 s->b_frame_strategy = avctx->b_frame_strategy;
715 if (avctx->b_sensitivity != 40)
716 s->b_sensitivity = avctx->b_sensitivity;
717 FF_ENABLE_DEPRECATION_WARNINGS
720 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
721 av_log(avctx, AV_LOG_INFO,
722 "notice: b_frame_strategy only affects the first pass\n");
723 s->b_frame_strategy = 0;
726 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
728 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
729 avctx->time_base.den /= i;
730 avctx->time_base.num /= i;
734 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
735 // (a + x * 3 / 8) / x
736 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
737 s->inter_quant_bias = 0;
739 s->intra_quant_bias = 0;
741 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
744 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
745 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
746 return AVERROR(EINVAL);
749 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
751 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
752 s->avctx->time_base.den > (1 << 16) - 1) {
753 av_log(avctx, AV_LOG_ERROR,
754 "timebase %d/%d not supported by MPEG 4 standard, "
755 "the maximum admitted value for the timebase denominator "
756 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
760 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
762 switch (avctx->codec->id) {
763 case AV_CODEC_ID_MPEG1VIDEO:
764 s->out_format = FMT_MPEG1;
765 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
766 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
768 case AV_CODEC_ID_MPEG2VIDEO:
769 s->out_format = FMT_MPEG1;
770 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
771 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
774 case AV_CODEC_ID_MJPEG:
775 case AV_CODEC_ID_AMV:
776 s->out_format = FMT_MJPEG;
777 s->intra_only = 1; /* force intra only for jpeg */
778 if (!CONFIG_MJPEG_ENCODER ||
779 ff_mjpeg_encode_init(s) < 0)
784 case AV_CODEC_ID_H261:
785 if (!CONFIG_H261_ENCODER)
787 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
788 av_log(avctx, AV_LOG_ERROR,
789 "The specified picture size of %dx%d is not valid for the "
790 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
791 s->width, s->height);
794 s->out_format = FMT_H261;
797 s->rtp_mode = 0; /* Sliced encoding not supported */
799 case AV_CODEC_ID_H263:
800 if (!CONFIG_H263_ENCODER)
802 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
803 s->width, s->height) == 8) {
804 av_log(avctx, AV_LOG_ERROR,
805 "The specified picture size of %dx%d is not valid for "
806 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
807 "352x288, 704x576, and 1408x1152. "
808 "Try H.263+.\n", s->width, s->height);
811 s->out_format = FMT_H263;
815 case AV_CODEC_ID_H263P:
816 s->out_format = FMT_H263;
819 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
820 s->modified_quant = s->h263_aic;
821 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
822 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
825 /* These are just to be sure */
829 case AV_CODEC_ID_FLV1:
830 s->out_format = FMT_H263;
831 s->h263_flv = 2; /* format = 1; 11-bit codes */
832 s->unrestricted_mv = 1;
833 s->rtp_mode = 0; /* don't allow GOB */
837 case AV_CODEC_ID_RV10:
838 s->out_format = FMT_H263;
842 case AV_CODEC_ID_RV20:
843 s->out_format = FMT_H263;
846 s->modified_quant = 1;
850 s->unrestricted_mv = 0;
852 case AV_CODEC_ID_MPEG4:
853 s->out_format = FMT_H263;
855 s->unrestricted_mv = 1;
856 s->low_delay = s->max_b_frames ? 0 : 1;
857 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
859 case AV_CODEC_ID_MSMPEG4V2:
860 s->out_format = FMT_H263;
862 s->unrestricted_mv = 1;
863 s->msmpeg4_version = 2;
867 case AV_CODEC_ID_MSMPEG4V3:
868 s->out_format = FMT_H263;
870 s->unrestricted_mv = 1;
871 s->msmpeg4_version = 3;
872 s->flipflop_rounding = 1;
876 case AV_CODEC_ID_WMV1:
877 s->out_format = FMT_H263;
879 s->unrestricted_mv = 1;
880 s->msmpeg4_version = 4;
881 s->flipflop_rounding = 1;
885 case AV_CODEC_ID_WMV2:
886 s->out_format = FMT_H263;
888 s->unrestricted_mv = 1;
889 s->msmpeg4_version = 5;
890 s->flipflop_rounding = 1;
898 #if FF_API_PRIVATE_OPT
899 FF_DISABLE_DEPRECATION_WARNINGS
900 if (avctx->noise_reduction)
901 s->noise_reduction = avctx->noise_reduction;
902 FF_ENABLE_DEPRECATION_WARNINGS
905 avctx->has_b_frames = !s->low_delay;
909 s->progressive_frame =
910 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
911 AV_CODEC_FLAG_INTERLACED_ME) ||
916 if (ff_mpv_common_init(s) < 0)
919 ff_fdctdsp_init(&s->fdsp, avctx);
920 ff_me_cmp_init(&s->mecc, avctx);
921 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
922 ff_pixblockdsp_init(&s->pdsp, avctx);
923 ff_qpeldsp_init(&s->qdsp);
925 if (s->msmpeg4_version) {
926 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
927 2 * 2 * (MAX_LEVEL + 1) *
928 (MAX_RUN + 1) * 2 * sizeof(int), fail);
930 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
932 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
933 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
934 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
935 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
936 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
937 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
938 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
939 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
940 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
941 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
944 if (s->noise_reduction) {
945 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
946 2 * 64 * sizeof(uint16_t), fail);
949 ff_dct_encode_init(s);
951 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
952 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
954 if (s->slice_context_count > 1) {
957 if (avctx->codec_id == AV_CODEC_ID_H263P)
958 s->h263_slice_structured = 1;
961 s->quant_precision = 5;
963 #if FF_API_PRIVATE_OPT
964 FF_DISABLE_DEPRECATION_WARNINGS
965 if (avctx->frame_skip_threshold)
966 s->frame_skip_threshold = avctx->frame_skip_threshold;
967 if (avctx->frame_skip_factor)
968 s->frame_skip_factor = avctx->frame_skip_factor;
969 if (avctx->frame_skip_exp)
970 s->frame_skip_exp = avctx->frame_skip_exp;
971 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
972 s->frame_skip_cmp = avctx->frame_skip_cmp;
973 FF_ENABLE_DEPRECATION_WARNINGS
976 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
977 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
979 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
980 ff_h261_encode_init(s);
981 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
982 ff_h263_encode_init(s);
983 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
984 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
986 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
987 && s->out_format == FMT_MPEG1)
988 ff_mpeg1_encode_init(s);
991 for (i = 0; i < 64; i++) {
992 int j = s->idsp.idct_permutation[i];
993 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
995 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
996 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
997 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
999 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1002 s->chroma_intra_matrix[j] =
1003 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1004 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1006 if (s->avctx->intra_matrix)
1007 s->intra_matrix[j] = s->avctx->intra_matrix[i];
1008 if (s->avctx->inter_matrix)
1009 s->inter_matrix[j] = s->avctx->inter_matrix[i];
1012 /* precompute matrix */
1013 /* for mjpeg, we do include qscale in the matrix */
1014 if (s->out_format != FMT_MJPEG) {
1015 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1016 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1018 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1019 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1023 if (ff_rate_control_init(s) < 0)
1026 #if FF_API_PRIVATE_OPT
1027 FF_DISABLE_DEPRECATION_WARNINGS
1028 if (avctx->brd_scale)
1029 s->brd_scale = avctx->brd_scale;
1031 if (avctx->prediction_method)
1032 s->pred = avctx->prediction_method + 1;
1033 FF_ENABLE_DEPRECATION_WARNINGS
1036 if (s->b_frame_strategy == 2) {
1037 for (i = 0; i < s->max_b_frames + 2; i++) {
1038 s->tmp_frames[i] = av_frame_alloc();
1039 if (!s->tmp_frames[i])
1040 return AVERROR(ENOMEM);
1042 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1043 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1044 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1046 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1052 cpb_props = ff_add_cpb_side_data(avctx);
1054 return AVERROR(ENOMEM);
1055 cpb_props->max_bitrate = avctx->rc_max_rate;
1056 cpb_props->min_bitrate = avctx->rc_min_rate;
1057 cpb_props->avg_bitrate = avctx->bit_rate;
1058 cpb_props->buffer_size = avctx->rc_buffer_size;
1062 ff_mpv_encode_end(avctx);
1063 return AVERROR_UNKNOWN;
1066 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1068 MpegEncContext *s = avctx->priv_data;
1071 ff_rate_control_uninit(s);
1073 ff_mpv_common_end(s);
1074 if (CONFIG_MJPEG_ENCODER &&
1075 s->out_format == FMT_MJPEG)
1076 ff_mjpeg_encode_close(s);
1078 av_freep(&avctx->extradata);
1080 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1081 av_frame_free(&s->tmp_frames[i]);
1083 ff_free_picture_tables(&s->new_picture);
1084 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1086 av_freep(&s->avctx->stats_out);
1087 av_freep(&s->ac_stats);
1089 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1090 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1091 s->q_chroma_intra_matrix= NULL;
1092 s->q_chroma_intra_matrix16= NULL;
1093 av_freep(&s->q_intra_matrix);
1094 av_freep(&s->q_inter_matrix);
1095 av_freep(&s->q_intra_matrix16);
1096 av_freep(&s->q_inter_matrix16);
1097 av_freep(&s->input_picture);
1098 av_freep(&s->reordered_input_picture);
1099 av_freep(&s->dct_offset);
1104 static int get_sae(uint8_t *src, int ref, int stride)
1109 for (y = 0; y < 16; y++) {
1110 for (x = 0; x < 16; x++) {
1111 acc += FFABS(src[x + y * stride] - ref);
1118 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1119 uint8_t *ref, int stride)
1125 h = s->height & ~15;
1127 for (y = 0; y < h; y += 16) {
1128 for (x = 0; x < w; x += 16) {
1129 int offset = x + y * stride;
1130 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1132 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1133 int sae = get_sae(src + offset, mean, stride);
1135 acc += sae + 500 < sad;
1141 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1143 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1144 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1145 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1146 &s->linesize, &s->uvlinesize);
1149 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1151 Picture *pic = NULL;
1153 int i, display_picture_number = 0, ret;
1154 int encoding_delay = s->max_b_frames ? s->max_b_frames
1155 : (s->low_delay ? 0 : 1);
1156 int flush_offset = 1;
1161 display_picture_number = s->input_picture_number++;
1163 if (pts != AV_NOPTS_VALUE) {
1164 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1165 int64_t last = s->user_specified_pts;
1168 av_log(s->avctx, AV_LOG_ERROR,
1169 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1171 return AVERROR(EINVAL);
1174 if (!s->low_delay && display_picture_number == 1)
1175 s->dts_delta = pts - last;
1177 s->user_specified_pts = pts;
1179 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1180 s->user_specified_pts =
1181 pts = s->user_specified_pts + 1;
1182 av_log(s->avctx, AV_LOG_INFO,
1183 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1186 pts = display_picture_number;
1190 if (!pic_arg->buf[0] ||
1191 pic_arg->linesize[0] != s->linesize ||
1192 pic_arg->linesize[1] != s->uvlinesize ||
1193 pic_arg->linesize[2] != s->uvlinesize)
1195 if ((s->width & 15) || (s->height & 15))
1197 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1199 if (s->linesize & (STRIDE_ALIGN-1))
1202 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1203 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1205 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1209 pic = &s->picture[i];
1213 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1216 ret = alloc_picture(s, pic, direct);
1221 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1222 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1223 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1226 int h_chroma_shift, v_chroma_shift;
1227 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1231 for (i = 0; i < 3; i++) {
1232 int src_stride = pic_arg->linesize[i];
1233 int dst_stride = i ? s->uvlinesize : s->linesize;
1234 int h_shift = i ? h_chroma_shift : 0;
1235 int v_shift = i ? v_chroma_shift : 0;
1236 int w = s->width >> h_shift;
1237 int h = s->height >> v_shift;
1238 uint8_t *src = pic_arg->data[i];
1239 uint8_t *dst = pic->f->data[i];
1242 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1243 && !s->progressive_sequence
1244 && FFALIGN(s->height, 32) - s->height > 16)
1247 if (!s->avctx->rc_buffer_size)
1248 dst += INPLACE_OFFSET;
1250 if (src_stride == dst_stride)
1251 memcpy(dst, src, src_stride * h);
1254 uint8_t *dst2 = dst;
1256 memcpy(dst2, src, w);
1261 if ((s->width & 15) || (s->height & (vpad-1))) {
1262 s->mpvencdsp.draw_edges(dst, dst_stride,
1272 ret = av_frame_copy_props(pic->f, pic_arg);
1276 pic->f->display_picture_number = display_picture_number;
1277 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1279 /* Flushing: When we have not received enough input frames,
1280 * ensure s->input_picture[0] contains the first picture */
1281 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1282 if (s->input_picture[flush_offset])
1285 if (flush_offset <= 1)
1288 encoding_delay = encoding_delay - flush_offset + 1;
1291 /* shift buffer entries */
1292 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1293 s->input_picture[i - flush_offset] = s->input_picture[i];
1295 s->input_picture[encoding_delay] = (Picture*) pic;
1300 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1304 int64_t score64 = 0;
1306 for (plane = 0; plane < 3; plane++) {
1307 const int stride = p->f->linesize[plane];
1308 const int bw = plane ? 1 : 2;
1309 for (y = 0; y < s->mb_height * bw; y++) {
1310 for (x = 0; x < s->mb_width * bw; x++) {
1311 int off = p->shared ? 0 : 16;
1312 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1313 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1314 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1316 switch (FFABS(s->frame_skip_exp)) {
1317 case 0: score = FFMAX(score, v); break;
1318 case 1: score += FFABS(v); break;
1319 case 2: score64 += v * (int64_t)v; break;
1320 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1321 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1330 if (s->frame_skip_exp < 0)
1331 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1332 -1.0/s->frame_skip_exp);
1334 if (score64 < s->frame_skip_threshold)
1336 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1341 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1343 AVPacket pkt = { 0 };
1347 av_init_packet(&pkt);
1349 ret = avcodec_send_frame(c, frame);
1354 ret = avcodec_receive_packet(c, &pkt);
1357 av_packet_unref(&pkt);
1358 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1365 static int estimate_best_b_count(MpegEncContext *s)
1367 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1368 const int scale = s->brd_scale;
1369 int width = s->width >> scale;
1370 int height = s->height >> scale;
1371 int i, j, out_size, p_lambda, b_lambda, lambda2;
1372 int64_t best_rd = INT64_MAX;
1373 int best_b_count = -1;
1376 av_assert0(scale >= 0 && scale <= 3);
1379 //s->next_picture_ptr->quality;
1380 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1381 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1382 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1383 if (!b_lambda) // FIXME we should do this somewhere else
1384 b_lambda = p_lambda;
1385 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1388 for (i = 0; i < s->max_b_frames + 2; i++) {
1389 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1390 s->next_picture_ptr;
1393 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1394 pre_input = *pre_input_ptr;
1395 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1397 if (!pre_input.shared && i) {
1398 data[0] += INPLACE_OFFSET;
1399 data[1] += INPLACE_OFFSET;
1400 data[2] += INPLACE_OFFSET;
1403 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1404 s->tmp_frames[i]->linesize[0],
1406 pre_input.f->linesize[0],
1408 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1409 s->tmp_frames[i]->linesize[1],
1411 pre_input.f->linesize[1],
1412 width >> 1, height >> 1);
1413 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1414 s->tmp_frames[i]->linesize[2],
1416 pre_input.f->linesize[2],
1417 width >> 1, height >> 1);
1421 for (j = 0; j < s->max_b_frames + 1; j++) {
1425 if (!s->input_picture[j])
1428 c = avcodec_alloc_context3(NULL);
1430 return AVERROR(ENOMEM);
1434 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1435 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1436 c->mb_decision = s->avctx->mb_decision;
1437 c->me_cmp = s->avctx->me_cmp;
1438 c->mb_cmp = s->avctx->mb_cmp;
1439 c->me_sub_cmp = s->avctx->me_sub_cmp;
1440 c->pix_fmt = AV_PIX_FMT_YUV420P;
1441 c->time_base = s->avctx->time_base;
1442 c->max_b_frames = s->max_b_frames;
1444 ret = avcodec_open2(c, codec, NULL);
1448 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1449 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1451 out_size = encode_frame(c, s->tmp_frames[0]);
1457 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1459 for (i = 0; i < s->max_b_frames + 1; i++) {
1460 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1462 s->tmp_frames[i + 1]->pict_type = is_p ?
1463 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1464 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1466 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1472 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1475 /* get the delayed frames */
1476 out_size = encode_frame(c, NULL);
1481 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1483 rd += c->error[0] + c->error[1] + c->error[2];
1491 avcodec_free_context(&c);
1496 return best_b_count;
1499 static int select_input_picture(MpegEncContext *s)
1503 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1504 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1505 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1507 /* set next picture type & ordering */
1508 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1509 if (s->frame_skip_threshold || s->frame_skip_factor) {
1510 if (s->picture_in_gop_number < s->gop_size &&
1511 s->next_picture_ptr &&
1512 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1513 // FIXME check that the gop check above is +-1 correct
1514 av_frame_unref(s->input_picture[0]->f);
1516 ff_vbv_update(s, 0);
1522 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1523 !s->next_picture_ptr || s->intra_only) {
1524 s->reordered_input_picture[0] = s->input_picture[0];
1525 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1526 s->reordered_input_picture[0]->f->coded_picture_number =
1527 s->coded_picture_number++;
1531 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1532 for (i = 0; i < s->max_b_frames + 1; i++) {
1533 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1535 if (pict_num >= s->rc_context.num_entries)
1537 if (!s->input_picture[i]) {
1538 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1542 s->input_picture[i]->f->pict_type =
1543 s->rc_context.entry[pict_num].new_pict_type;
1547 if (s->b_frame_strategy == 0) {
1548 b_frames = s->max_b_frames;
1549 while (b_frames && !s->input_picture[b_frames])
1551 } else if (s->b_frame_strategy == 1) {
1552 for (i = 1; i < s->max_b_frames + 1; i++) {
1553 if (s->input_picture[i] &&
1554 s->input_picture[i]->b_frame_score == 0) {
1555 s->input_picture[i]->b_frame_score =
1557 s->input_picture[i ]->f->data[0],
1558 s->input_picture[i - 1]->f->data[0],
1562 for (i = 0; i < s->max_b_frames + 1; i++) {
1563 if (!s->input_picture[i] ||
1564 s->input_picture[i]->b_frame_score - 1 >
1565 s->mb_num / s->b_sensitivity)
1569 b_frames = FFMAX(0, i - 1);
1572 for (i = 0; i < b_frames + 1; i++) {
1573 s->input_picture[i]->b_frame_score = 0;
1575 } else if (s->b_frame_strategy == 2) {
1576 b_frames = estimate_best_b_count(s);
1583 for (i = b_frames - 1; i >= 0; i--) {
1584 int type = s->input_picture[i]->f->pict_type;
1585 if (type && type != AV_PICTURE_TYPE_B)
1588 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1589 b_frames == s->max_b_frames) {
1590 av_log(s->avctx, AV_LOG_ERROR,
1591 "warning, too many B-frames in a row\n");
1594 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1595 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1596 s->gop_size > s->picture_in_gop_number) {
1597 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1599 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1601 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1605 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1606 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1609 s->reordered_input_picture[0] = s->input_picture[b_frames];
1610 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1611 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1612 s->reordered_input_picture[0]->f->coded_picture_number =
1613 s->coded_picture_number++;
1614 for (i = 0; i < b_frames; i++) {
1615 s->reordered_input_picture[i + 1] = s->input_picture[i];
1616 s->reordered_input_picture[i + 1]->f->pict_type =
1618 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1619 s->coded_picture_number++;
1624 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1626 if (s->reordered_input_picture[0]) {
1627 s->reordered_input_picture[0]->reference =
1628 s->reordered_input_picture[0]->f->pict_type !=
1629 AV_PICTURE_TYPE_B ? 3 : 0;
1631 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1634 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1635 // input is a shared pix, so we can't modify it -> allocate a new
1636 // one & ensure that the shared one is reuseable
1639 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1642 pic = &s->picture[i];
1644 pic->reference = s->reordered_input_picture[0]->reference;
1645 if (alloc_picture(s, pic, 0) < 0) {
1649 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1653 /* mark us unused / free shared pic */
1654 av_frame_unref(s->reordered_input_picture[0]->f);
1655 s->reordered_input_picture[0]->shared = 0;
1657 s->current_picture_ptr = pic;
1659 // input is not a shared pix -> reuse buffer for current_pix
1660 s->current_picture_ptr = s->reordered_input_picture[0];
1661 for (i = 0; i < 4; i++) {
1662 s->new_picture.f->data[i] += INPLACE_OFFSET;
1665 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1666 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1667 s->current_picture_ptr)) < 0)
1670 s->picture_number = s->new_picture.f->display_picture_number;
1675 static void frame_end(MpegEncContext *s)
1677 if (s->unrestricted_mv &&
1678 s->current_picture.reference &&
1680 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1681 int hshift = desc->log2_chroma_w;
1682 int vshift = desc->log2_chroma_h;
1683 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1684 s->current_picture.f->linesize[0],
1685 s->h_edge_pos, s->v_edge_pos,
1686 EDGE_WIDTH, EDGE_WIDTH,
1687 EDGE_TOP | EDGE_BOTTOM);
1688 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1689 s->current_picture.f->linesize[1],
1690 s->h_edge_pos >> hshift,
1691 s->v_edge_pos >> vshift,
1692 EDGE_WIDTH >> hshift,
1693 EDGE_WIDTH >> vshift,
1694 EDGE_TOP | EDGE_BOTTOM);
1695 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1696 s->current_picture.f->linesize[2],
1697 s->h_edge_pos >> hshift,
1698 s->v_edge_pos >> vshift,
1699 EDGE_WIDTH >> hshift,
1700 EDGE_WIDTH >> vshift,
1701 EDGE_TOP | EDGE_BOTTOM);
1706 s->last_pict_type = s->pict_type;
1707 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1708 if (s->pict_type!= AV_PICTURE_TYPE_B)
1709 s->last_non_b_pict_type = s->pict_type;
1711 #if FF_API_CODED_FRAME
1712 FF_DISABLE_DEPRECATION_WARNINGS
1713 av_frame_unref(s->avctx->coded_frame);
1714 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1715 FF_ENABLE_DEPRECATION_WARNINGS
1717 #if FF_API_ERROR_FRAME
1718 FF_DISABLE_DEPRECATION_WARNINGS
1719 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1720 sizeof(s->current_picture.encoding_error));
1721 FF_ENABLE_DEPRECATION_WARNINGS
1725 static void update_noise_reduction(MpegEncContext *s)
1729 for (intra = 0; intra < 2; intra++) {
1730 if (s->dct_count[intra] > (1 << 16)) {
1731 for (i = 0; i < 64; i++) {
1732 s->dct_error_sum[intra][i] >>= 1;
1734 s->dct_count[intra] >>= 1;
1737 for (i = 0; i < 64; i++) {
1738 s->dct_offset[intra][i] = (s->noise_reduction *
1739 s->dct_count[intra] +
1740 s->dct_error_sum[intra][i] / 2) /
1741 (s->dct_error_sum[intra][i] + 1);
1746 static int frame_start(MpegEncContext *s)
1750 /* mark & release old frames */
1751 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1752 s->last_picture_ptr != s->next_picture_ptr &&
1753 s->last_picture_ptr->f->buf[0]) {
1754 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1757 s->current_picture_ptr->f->pict_type = s->pict_type;
1758 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1760 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1761 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1762 s->current_picture_ptr)) < 0)
1765 if (s->pict_type != AV_PICTURE_TYPE_B) {
1766 s->last_picture_ptr = s->next_picture_ptr;
1768 s->next_picture_ptr = s->current_picture_ptr;
1771 if (s->last_picture_ptr) {
1772 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1773 if (s->last_picture_ptr->f->buf[0] &&
1774 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1775 s->last_picture_ptr)) < 0)
1778 if (s->next_picture_ptr) {
1779 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1780 if (s->next_picture_ptr->f->buf[0] &&
1781 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1782 s->next_picture_ptr)) < 0)
1786 if (s->picture_structure!= PICT_FRAME) {
1788 for (i = 0; i < 4; i++) {
1789 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1790 s->current_picture.f->data[i] +=
1791 s->current_picture.f->linesize[i];
1793 s->current_picture.f->linesize[i] *= 2;
1794 s->last_picture.f->linesize[i] *= 2;
1795 s->next_picture.f->linesize[i] *= 2;
1799 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1800 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1801 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1802 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1803 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1804 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1806 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1807 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1810 if (s->dct_error_sum) {
1811 av_assert2(s->noise_reduction && s->encoding);
1812 update_noise_reduction(s);
1818 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1819 const AVFrame *pic_arg, int *got_packet)
1821 MpegEncContext *s = avctx->priv_data;
1822 int i, stuffing_count, ret;
1823 int context_count = s->slice_context_count;
1825 s->vbv_ignore_qmax = 0;
1827 s->picture_in_gop_number++;
1829 if (load_input_picture(s, pic_arg) < 0)
1832 if (select_input_picture(s) < 0) {
1837 if (s->new_picture.f->data[0]) {
1838 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1839 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1841 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1842 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1845 s->mb_info_ptr = av_packet_new_side_data(pkt,
1846 AV_PKT_DATA_H263_MB_INFO,
1847 s->mb_width*s->mb_height*12);
1848 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1851 for (i = 0; i < context_count; i++) {
1852 int start_y = s->thread_context[i]->start_mb_y;
1853 int end_y = s->thread_context[i]-> end_mb_y;
1854 int h = s->mb_height;
1855 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1856 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1858 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1861 s->pict_type = s->new_picture.f->pict_type;
1863 ret = frame_start(s);
1867 ret = encode_picture(s, s->picture_number);
1868 if (growing_buffer) {
1869 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1870 pkt->data = s->pb.buf;
1871 pkt->size = avctx->internal->byte_buffer_size;
1876 #if FF_API_STAT_BITS
1877 FF_DISABLE_DEPRECATION_WARNINGS
1878 avctx->header_bits = s->header_bits;
1879 avctx->mv_bits = s->mv_bits;
1880 avctx->misc_bits = s->misc_bits;
1881 avctx->i_tex_bits = s->i_tex_bits;
1882 avctx->p_tex_bits = s->p_tex_bits;
1883 avctx->i_count = s->i_count;
1884 // FIXME f/b_count in avctx
1885 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1886 avctx->skip_count = s->skip_count;
1887 FF_ENABLE_DEPRECATION_WARNINGS
1892 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1893 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1895 if (avctx->rc_buffer_size) {
1896 RateControlContext *rcc = &s->rc_context;
1897 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1898 int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1899 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1901 if (put_bits_count(&s->pb) > max_size &&
1902 s->lambda < s->lmax) {
1903 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1904 (s->qscale + 1) / s->qscale);
1905 if (s->adaptive_quant) {
1907 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1908 s->lambda_table[i] =
1909 FFMAX(s->lambda_table[i] + min_step,
1910 s->lambda_table[i] * (s->qscale + 1) /
1913 s->mb_skipped = 0; // done in frame_start()
1914 // done in encode_picture() so we must undo it
1915 if (s->pict_type == AV_PICTURE_TYPE_P) {
1916 if (s->flipflop_rounding ||
1917 s->codec_id == AV_CODEC_ID_H263P ||
1918 s->codec_id == AV_CODEC_ID_MPEG4)
1919 s->no_rounding ^= 1;
1921 if (s->pict_type != AV_PICTURE_TYPE_B) {
1922 s->time_base = s->last_time_base;
1923 s->last_non_b_time = s->time - s->pp_time;
1925 for (i = 0; i < context_count; i++) {
1926 PutBitContext *pb = &s->thread_context[i]->pb;
1927 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1929 s->vbv_ignore_qmax = 1;
1930 av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1934 av_assert0(s->avctx->rc_max_rate);
1937 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1938 ff_write_pass1_stats(s);
1940 for (i = 0; i < 4; i++) {
1941 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1942 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1944 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1945 s->current_picture_ptr->encoding_error,
1946 (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1949 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1950 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1951 s->misc_bits + s->i_tex_bits +
1953 flush_put_bits(&s->pb);
1954 s->frame_bits = put_bits_count(&s->pb);
1956 stuffing_count = ff_vbv_update(s, s->frame_bits);
1957 s->stuffing_bits = 8*stuffing_count;
1958 if (stuffing_count) {
1959 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1960 stuffing_count + 50) {
1961 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1965 switch (s->codec_id) {
1966 case AV_CODEC_ID_MPEG1VIDEO:
1967 case AV_CODEC_ID_MPEG2VIDEO:
1968 while (stuffing_count--) {
1969 put_bits(&s->pb, 8, 0);
1972 case AV_CODEC_ID_MPEG4:
1973 put_bits(&s->pb, 16, 0);
1974 put_bits(&s->pb, 16, 0x1C3);
1975 stuffing_count -= 4;
1976 while (stuffing_count--) {
1977 put_bits(&s->pb, 8, 0xFF);
1981 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1983 flush_put_bits(&s->pb);
1984 s->frame_bits = put_bits_count(&s->pb);
1987 /* update MPEG-1/2 vbv_delay for CBR */
1988 if (s->avctx->rc_max_rate &&
1989 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1990 s->out_format == FMT_MPEG1 &&
1991 90000LL * (avctx->rc_buffer_size - 1) <=
1992 s->avctx->rc_max_rate * 0xFFFFLL) {
1993 AVCPBProperties *props;
1996 int vbv_delay, min_delay;
1997 double inbits = s->avctx->rc_max_rate *
1998 av_q2d(s->avctx->time_base);
1999 int minbits = s->frame_bits - 8 *
2000 (s->vbv_delay_ptr - s->pb.buf - 1);
2001 double bits = s->rc_context.buffer_index + minbits - inbits;
2004 av_log(s->avctx, AV_LOG_ERROR,
2005 "Internal error, negative bits\n");
2007 av_assert1(s->repeat_first_field == 0);
2009 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
2010 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
2011 s->avctx->rc_max_rate;
2013 vbv_delay = FFMAX(vbv_delay, min_delay);
2015 av_assert0(vbv_delay < 0xFFFF);
2017 s->vbv_delay_ptr[0] &= 0xF8;
2018 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2019 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2020 s->vbv_delay_ptr[2] &= 0x07;
2021 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2023 props = av_cpb_properties_alloc(&props_size);
2025 return AVERROR(ENOMEM);
2026 props->vbv_delay = vbv_delay * 300;
2028 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2029 (uint8_t*)props, props_size);
2035 #if FF_API_VBV_DELAY
2036 FF_DISABLE_DEPRECATION_WARNINGS
2037 avctx->vbv_delay = vbv_delay * 300;
2038 FF_ENABLE_DEPRECATION_WARNINGS
2041 s->total_bits += s->frame_bits;
2042 #if FF_API_STAT_BITS
2043 FF_DISABLE_DEPRECATION_WARNINGS
2044 avctx->frame_bits = s->frame_bits;
2045 FF_ENABLE_DEPRECATION_WARNINGS
2049 pkt->pts = s->current_picture.f->pts;
2050 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2051 if (!s->current_picture.f->coded_picture_number)
2052 pkt->dts = pkt->pts - s->dts_delta;
2054 pkt->dts = s->reordered_pts;
2055 s->reordered_pts = pkt->pts;
2057 pkt->dts = pkt->pts;
2058 if (s->current_picture.f->key_frame)
2059 pkt->flags |= AV_PKT_FLAG_KEY;
2061 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2066 /* release non-reference frames */
2067 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2068 if (!s->picture[i].reference)
2069 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2072 av_assert1((s->frame_bits & 7) == 0);
2074 pkt->size = s->frame_bits / 8;
2075 *got_packet = !!pkt->size;
2079 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2080 int n, int threshold)
2082 static const char tab[64] = {
2083 3, 2, 2, 1, 1, 1, 1, 1,
2084 1, 1, 1, 1, 1, 1, 1, 1,
2085 1, 1, 1, 1, 1, 1, 1, 1,
2086 0, 0, 0, 0, 0, 0, 0, 0,
2087 0, 0, 0, 0, 0, 0, 0, 0,
2088 0, 0, 0, 0, 0, 0, 0, 0,
2089 0, 0, 0, 0, 0, 0, 0, 0,
2090 0, 0, 0, 0, 0, 0, 0, 0
2095 int16_t *block = s->block[n];
2096 const int last_index = s->block_last_index[n];
2099 if (threshold < 0) {
2101 threshold = -threshold;
2105 /* Are all we could set to zero already zero? */
2106 if (last_index <= skip_dc - 1)
2109 for (i = 0; i <= last_index; i++) {
2110 const int j = s->intra_scantable.permutated[i];
2111 const int level = FFABS(block[j]);
2113 if (skip_dc && i == 0)
2117 } else if (level > 1) {
2123 if (score >= threshold)
2125 for (i = skip_dc; i <= last_index; i++) {
2126 const int j = s->intra_scantable.permutated[i];
2130 s->block_last_index[n] = 0;
2132 s->block_last_index[n] = -1;
2135 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2139 const int maxlevel = s->max_qcoeff;
2140 const int minlevel = s->min_qcoeff;
2144 i = 1; // skip clipping of intra dc
2148 for (; i <= last_index; i++) {
2149 const int j = s->intra_scantable.permutated[i];
2150 int level = block[j];
2152 if (level > maxlevel) {
2155 } else if (level < minlevel) {
2163 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2164 av_log(s->avctx, AV_LOG_INFO,
2165 "warning, clipping %d dct coefficients to %d..%d\n",
2166 overflow, minlevel, maxlevel);
2169 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2173 for (y = 0; y < 8; y++) {
2174 for (x = 0; x < 8; x++) {
2180 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2181 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2182 int v = ptr[x2 + y2 * stride];
2188 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2193 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2194 int motion_x, int motion_y,
2195 int mb_block_height,
2199 int16_t weight[12][64];
2200 int16_t orig[12][64];
2201 const int mb_x = s->mb_x;
2202 const int mb_y = s->mb_y;
2205 int dct_offset = s->linesize * 8; // default for progressive frames
2206 int uv_dct_offset = s->uvlinesize * 8;
2207 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2208 ptrdiff_t wrap_y, wrap_c;
2210 for (i = 0; i < mb_block_count; i++)
2211 skip_dct[i] = s->skipdct;
2213 if (s->adaptive_quant) {
2214 const int last_qp = s->qscale;
2215 const int mb_xy = mb_x + mb_y * s->mb_stride;
2217 s->lambda = s->lambda_table[mb_xy];
2220 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2221 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2222 s->dquant = s->qscale - last_qp;
2224 if (s->out_format == FMT_H263) {
2225 s->dquant = av_clip(s->dquant, -2, 2);
2227 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2229 if (s->pict_type == AV_PICTURE_TYPE_B) {
2230 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2233 if (s->mv_type == MV_TYPE_8X8)
2239 ff_set_qscale(s, last_qp + s->dquant);
2240 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2241 ff_set_qscale(s, s->qscale + s->dquant);
2243 wrap_y = s->linesize;
2244 wrap_c = s->uvlinesize;
2245 ptr_y = s->new_picture.f->data[0] +
2246 (mb_y * 16 * wrap_y) + mb_x * 16;
2247 ptr_cb = s->new_picture.f->data[1] +
2248 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2249 ptr_cr = s->new_picture.f->data[2] +
2250 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2252 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2253 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2254 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2255 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2256 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2258 16, 16, mb_x * 16, mb_y * 16,
2259 s->width, s->height);
2261 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2263 mb_block_width, mb_block_height,
2264 mb_x * mb_block_width, mb_y * mb_block_height,
2266 ptr_cb = ebuf + 16 * wrap_y;
2267 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2269 mb_block_width, mb_block_height,
2270 mb_x * mb_block_width, mb_y * mb_block_height,
2272 ptr_cr = ebuf + 16 * wrap_y + 16;
2276 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2277 int progressive_score, interlaced_score;
2279 s->interlaced_dct = 0;
2280 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2281 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2282 NULL, wrap_y, 8) - 400;
2284 if (progressive_score > 0) {
2285 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2286 NULL, wrap_y * 2, 8) +
2287 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2288 NULL, wrap_y * 2, 8);
2289 if (progressive_score > interlaced_score) {
2290 s->interlaced_dct = 1;
2292 dct_offset = wrap_y;
2293 uv_dct_offset = wrap_c;
2295 if (s->chroma_format == CHROMA_422 ||
2296 s->chroma_format == CHROMA_444)
2302 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2303 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2304 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2305 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2307 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2311 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2312 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2313 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2314 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2315 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2316 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2317 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2318 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2319 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2320 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2321 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2322 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2326 op_pixels_func (*op_pix)[4];
2327 qpel_mc_func (*op_qpix)[16];
2328 uint8_t *dest_y, *dest_cb, *dest_cr;
2330 dest_y = s->dest[0];
2331 dest_cb = s->dest[1];
2332 dest_cr = s->dest[2];
2334 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2335 op_pix = s->hdsp.put_pixels_tab;
2336 op_qpix = s->qdsp.put_qpel_pixels_tab;
2338 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2339 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2342 if (s->mv_dir & MV_DIR_FORWARD) {
2343 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2344 s->last_picture.f->data,
2346 op_pix = s->hdsp.avg_pixels_tab;
2347 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2349 if (s->mv_dir & MV_DIR_BACKWARD) {
2350 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2351 s->next_picture.f->data,
2355 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2356 int progressive_score, interlaced_score;
2358 s->interlaced_dct = 0;
2359 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2360 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2364 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2365 progressive_score -= 400;
2367 if (progressive_score > 0) {
2368 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2370 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2374 if (progressive_score > interlaced_score) {
2375 s->interlaced_dct = 1;
2377 dct_offset = wrap_y;
2378 uv_dct_offset = wrap_c;
2380 if (s->chroma_format == CHROMA_422)
2386 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2387 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2388 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2389 dest_y + dct_offset, wrap_y);
2390 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2391 dest_y + dct_offset + 8, wrap_y);
2393 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2397 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2398 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2399 if (!s->chroma_y_shift) { /* 422 */
2400 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2401 dest_cb + uv_dct_offset, wrap_c);
2402 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2403 dest_cr + uv_dct_offset, wrap_c);
2406 /* pre quantization */
2407 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2408 2 * s->qscale * s->qscale) {
2410 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2412 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2414 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2415 wrap_y, 8) < 20 * s->qscale)
2417 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2418 wrap_y, 8) < 20 * s->qscale)
2420 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2422 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2424 if (!s->chroma_y_shift) { /* 422 */
2425 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2426 dest_cb + uv_dct_offset,
2427 wrap_c, 8) < 20 * s->qscale)
2429 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2430 dest_cr + uv_dct_offset,
2431 wrap_c, 8) < 20 * s->qscale)
2437 if (s->quantizer_noise_shaping) {
2439 get_visual_weight(weight[0], ptr_y , wrap_y);
2441 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2443 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2445 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2447 get_visual_weight(weight[4], ptr_cb , wrap_c);
2449 get_visual_weight(weight[5], ptr_cr , wrap_c);
2450 if (!s->chroma_y_shift) { /* 422 */
2452 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2455 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2458 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2461 /* DCT & quantize */
2462 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2464 for (i = 0; i < mb_block_count; i++) {
2467 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2468 // FIXME we could decide to change to quantizer instead of
2470 // JS: I don't think that would be a good idea it could lower
2471 // quality instead of improve it. Just INTRADC clipping
2472 // deserves changes in quantizer
2474 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2476 s->block_last_index[i] = -1;
2478 if (s->quantizer_noise_shaping) {
2479 for (i = 0; i < mb_block_count; i++) {
2481 s->block_last_index[i] =
2482 dct_quantize_refine(s, s->block[i], weight[i],
2483 orig[i], i, s->qscale);
2488 if (s->luma_elim_threshold && !s->mb_intra)
2489 for (i = 0; i < 4; i++)
2490 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2491 if (s->chroma_elim_threshold && !s->mb_intra)
2492 for (i = 4; i < mb_block_count; i++)
2493 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2495 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2496 for (i = 0; i < mb_block_count; i++) {
2497 if (s->block_last_index[i] == -1)
2498 s->coded_score[i] = INT_MAX / 256;
2503 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2504 s->block_last_index[4] =
2505 s->block_last_index[5] = 0;
2507 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2508 if (!s->chroma_y_shift) { /* 422 / 444 */
2509 for (i=6; i<12; i++) {
2510 s->block_last_index[i] = 0;
2511 s->block[i][0] = s->block[4][0];
2516 // non c quantize code returns incorrect block_last_index FIXME
2517 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2518 for (i = 0; i < mb_block_count; i++) {
2520 if (s->block_last_index[i] > 0) {
2521 for (j = 63; j > 0; j--) {
2522 if (s->block[i][s->intra_scantable.permutated[j]])
2525 s->block_last_index[i] = j;
2530 /* huffman encode */
2531 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2532 case AV_CODEC_ID_MPEG1VIDEO:
2533 case AV_CODEC_ID_MPEG2VIDEO:
2534 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2535 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2537 case AV_CODEC_ID_MPEG4:
2538 if (CONFIG_MPEG4_ENCODER)
2539 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2541 case AV_CODEC_ID_MSMPEG4V2:
2542 case AV_CODEC_ID_MSMPEG4V3:
2543 case AV_CODEC_ID_WMV1:
2544 if (CONFIG_MSMPEG4_ENCODER)
2545 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2547 case AV_CODEC_ID_WMV2:
2548 if (CONFIG_WMV2_ENCODER)
2549 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2551 case AV_CODEC_ID_H261:
2552 if (CONFIG_H261_ENCODER)
2553 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2555 case AV_CODEC_ID_H263:
2556 case AV_CODEC_ID_H263P:
2557 case AV_CODEC_ID_FLV1:
2558 case AV_CODEC_ID_RV10:
2559 case AV_CODEC_ID_RV20:
2560 if (CONFIG_H263_ENCODER)
2561 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2563 case AV_CODEC_ID_MJPEG:
2564 case AV_CODEC_ID_AMV:
2565 if (CONFIG_MJPEG_ENCODER)
2566 ff_mjpeg_encode_mb(s, s->block);
2573 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2575 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2576 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2577 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2580 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2583 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2586 d->mb_skip_run= s->mb_skip_run;
2588 d->last_dc[i] = s->last_dc[i];
2591 d->mv_bits= s->mv_bits;
2592 d->i_tex_bits= s->i_tex_bits;
2593 d->p_tex_bits= s->p_tex_bits;
2594 d->i_count= s->i_count;
2595 d->f_count= s->f_count;
2596 d->b_count= s->b_count;
2597 d->skip_count= s->skip_count;
2598 d->misc_bits= s->misc_bits;
2602 d->qscale= s->qscale;
2603 d->dquant= s->dquant;
2605 d->esc3_level_length= s->esc3_level_length;
2608 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2611 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2612 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2615 d->mb_skip_run= s->mb_skip_run;
2617 d->last_dc[i] = s->last_dc[i];
2620 d->mv_bits= s->mv_bits;
2621 d->i_tex_bits= s->i_tex_bits;
2622 d->p_tex_bits= s->p_tex_bits;
2623 d->i_count= s->i_count;
2624 d->f_count= s->f_count;
2625 d->b_count= s->b_count;
2626 d->skip_count= s->skip_count;
2627 d->misc_bits= s->misc_bits;
2629 d->mb_intra= s->mb_intra;
2630 d->mb_skipped= s->mb_skipped;
2631 d->mv_type= s->mv_type;
2632 d->mv_dir= s->mv_dir;
2634 if(s->data_partitioning){
2636 d->tex_pb= s->tex_pb;
2640 d->block_last_index[i]= s->block_last_index[i];
2641 d->interlaced_dct= s->interlaced_dct;
2642 d->qscale= s->qscale;
2644 d->esc3_level_length= s->esc3_level_length;
2647 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2648 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2649 int *dmin, int *next_block, int motion_x, int motion_y)
2652 uint8_t *dest_backup[3];
2654 copy_context_before_encode(s, backup, type);
2656 s->block= s->blocks[*next_block];
2657 s->pb= pb[*next_block];
2658 if(s->data_partitioning){
2659 s->pb2 = pb2 [*next_block];
2660 s->tex_pb= tex_pb[*next_block];
2664 memcpy(dest_backup, s->dest, sizeof(s->dest));
2665 s->dest[0] = s->sc.rd_scratchpad;
2666 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2667 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2668 av_assert0(s->linesize >= 32); //FIXME
2671 encode_mb(s, motion_x, motion_y);
2673 score= put_bits_count(&s->pb);
2674 if(s->data_partitioning){
2675 score+= put_bits_count(&s->pb2);
2676 score+= put_bits_count(&s->tex_pb);
2679 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2680 ff_mpv_reconstruct_mb(s, s->block);
2682 score *= s->lambda2;
2683 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2687 memcpy(s->dest, dest_backup, sizeof(s->dest));
2694 copy_context_after_encode(best, s, type);
2698 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2699 const uint32_t *sq = ff_square_tab + 256;
2704 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2705 else if(w==8 && h==8)
2706 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2710 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2719 static int sse_mb(MpegEncContext *s){
2723 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2724 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2727 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2728 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2729 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2730 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2732 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2733 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2734 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2737 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2738 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2739 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2742 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2743 MpegEncContext *s= *(void**)arg;
2747 s->me.dia_size= s->avctx->pre_dia_size;
2748 s->first_slice_line=1;
2749 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2750 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2751 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2753 s->first_slice_line=0;
2761 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2762 MpegEncContext *s= *(void**)arg;
2764 ff_check_alignment();
2766 s->me.dia_size= s->avctx->dia_size;
2767 s->first_slice_line=1;
2768 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2769 s->mb_x=0; //for block init below
2770 ff_init_block_index(s);
2771 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2772 s->block_index[0]+=2;
2773 s->block_index[1]+=2;
2774 s->block_index[2]+=2;
2775 s->block_index[3]+=2;
2777 /* compute motion vector & mb_type and store in context */
2778 if(s->pict_type==AV_PICTURE_TYPE_B)
2779 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2781 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2783 s->first_slice_line=0;
2788 static int mb_var_thread(AVCodecContext *c, void *arg){
2789 MpegEncContext *s= *(void**)arg;
2792 ff_check_alignment();
2794 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2795 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2798 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2800 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2802 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2803 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2805 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2806 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2807 s->me.mb_var_sum_temp += varc;
2813 static void write_slice_end(MpegEncContext *s){
2814 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2815 if(s->partitioned_frame){
2816 ff_mpeg4_merge_partitions(s);
2819 ff_mpeg4_stuffing(&s->pb);
2820 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2821 ff_mjpeg_encode_stuffing(s);
2824 avpriv_align_put_bits(&s->pb);
2825 flush_put_bits(&s->pb);
2827 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2828 s->misc_bits+= get_bits_diff(s);
2831 static void write_mb_info(MpegEncContext *s)
2833 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2834 int offset = put_bits_count(&s->pb);
2835 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2836 int gobn = s->mb_y / s->gob_index;
2838 if (CONFIG_H263_ENCODER)
2839 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2840 bytestream_put_le32(&ptr, offset);
2841 bytestream_put_byte(&ptr, s->qscale);
2842 bytestream_put_byte(&ptr, gobn);
2843 bytestream_put_le16(&ptr, mba);
2844 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2845 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2846 /* 4MV not implemented */
2847 bytestream_put_byte(&ptr, 0); /* hmv2 */
2848 bytestream_put_byte(&ptr, 0); /* vmv2 */
2851 static void update_mb_info(MpegEncContext *s, int startcode)
2855 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2856 s->mb_info_size += 12;
2857 s->prev_mb_info = s->last_mb_info;
2860 s->prev_mb_info = put_bits_count(&s->pb)/8;
2861 /* This might have incremented mb_info_size above, and we return without
2862 * actually writing any info into that slot yet. But in that case,
2863 * this will be called again at the start of the after writing the
2864 * start code, actually writing the mb info. */
2868 s->last_mb_info = put_bits_count(&s->pb)/8;
2869 if (!s->mb_info_size)
2870 s->mb_info_size += 12;
2874 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2876 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2877 && s->slice_context_count == 1
2878 && s->pb.buf == s->avctx->internal->byte_buffer) {
2879 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2880 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2882 uint8_t *new_buffer = NULL;
2883 int new_buffer_size = 0;
2885 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2886 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2887 return AVERROR(ENOMEM);
2892 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2893 s->avctx->internal->byte_buffer_size + size_increase);
2895 return AVERROR(ENOMEM);
2897 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2898 av_free(s->avctx->internal->byte_buffer);
2899 s->avctx->internal->byte_buffer = new_buffer;
2900 s->avctx->internal->byte_buffer_size = new_buffer_size;
2901 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2902 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2903 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2905 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2906 return AVERROR(EINVAL);
2910 static int encode_thread(AVCodecContext *c, void *arg){
2911 MpegEncContext *s= *(void**)arg;
2913 int chr_h= 16>>s->chroma_y_shift;
2915 MpegEncContext best_s = { 0 }, backup_s;
2916 uint8_t bit_buf[2][MAX_MB_BYTES];
2917 uint8_t bit_buf2[2][MAX_MB_BYTES];
2918 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2919 PutBitContext pb[2], pb2[2], tex_pb[2];
2921 ff_check_alignment();
2924 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2925 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2926 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2929 s->last_bits= put_bits_count(&s->pb);
2940 /* init last dc values */
2941 /* note: quant matrix value (8) is implied here */
2942 s->last_dc[i] = 128 << s->intra_dc_precision;
2944 s->current_picture.encoding_error[i] = 0;
2946 if(s->codec_id==AV_CODEC_ID_AMV){
2947 s->last_dc[0] = 128*8/13;
2948 s->last_dc[1] = 128*8/14;
2949 s->last_dc[2] = 128*8/14;
2952 memset(s->last_mv, 0, sizeof(s->last_mv));
2956 switch(s->codec_id){
2957 case AV_CODEC_ID_H263:
2958 case AV_CODEC_ID_H263P:
2959 case AV_CODEC_ID_FLV1:
2960 if (CONFIG_H263_ENCODER)
2961 s->gob_index = H263_GOB_HEIGHT(s->height);
2963 case AV_CODEC_ID_MPEG4:
2964 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2965 ff_mpeg4_init_partitions(s);
2971 s->first_slice_line = 1;
2972 s->ptr_lastgob = s->pb.buf;
2973 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2977 ff_set_qscale(s, s->qscale);
2978 ff_init_block_index(s);
2980 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2981 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2982 int mb_type= s->mb_type[xy];
2986 int size_increase = s->avctx->internal->byte_buffer_size/4
2987 + s->mb_width*MAX_MB_BYTES;
2989 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2990 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2991 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2994 if(s->data_partitioning){
2995 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2996 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2997 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3003 s->mb_y = mb_y; // moved into loop, can get changed by H.261
3004 ff_update_block_index(s);
3006 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3007 ff_h261_reorder_mb_index(s);
3008 xy= s->mb_y*s->mb_stride + s->mb_x;
3009 mb_type= s->mb_type[xy];
3012 /* write gob / video packet header */
3014 int current_packet_size, is_gob_start;
3016 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3018 is_gob_start = s->rtp_payload_size &&
3019 current_packet_size >= s->rtp_payload_size &&
3022 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3024 switch(s->codec_id){
3025 case AV_CODEC_ID_H263:
3026 case AV_CODEC_ID_H263P:
3027 if(!s->h263_slice_structured)
3028 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3030 case AV_CODEC_ID_MPEG2VIDEO:
3031 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3032 case AV_CODEC_ID_MPEG1VIDEO:
3033 if(s->mb_skip_run) is_gob_start=0;
3035 case AV_CODEC_ID_MJPEG:
3036 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3041 if(s->start_mb_y != mb_y || mb_x!=0){
3044 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3045 ff_mpeg4_init_partitions(s);
3049 av_assert2((put_bits_count(&s->pb)&7) == 0);
3050 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3052 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3053 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3054 int d = 100 / s->error_rate;
3056 current_packet_size=0;
3057 s->pb.buf_ptr= s->ptr_lastgob;
3058 av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3062 #if FF_API_RTP_CALLBACK
3063 FF_DISABLE_DEPRECATION_WARNINGS
3064 if (s->avctx->rtp_callback){
3065 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3066 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3068 FF_ENABLE_DEPRECATION_WARNINGS
3070 update_mb_info(s, 1);
3072 switch(s->codec_id){
3073 case AV_CODEC_ID_MPEG4:
3074 if (CONFIG_MPEG4_ENCODER) {
3075 ff_mpeg4_encode_video_packet_header(s);
3076 ff_mpeg4_clean_buffers(s);
3079 case AV_CODEC_ID_MPEG1VIDEO:
3080 case AV_CODEC_ID_MPEG2VIDEO:
3081 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3082 ff_mpeg1_encode_slice_header(s);
3083 ff_mpeg1_clean_buffers(s);
3086 case AV_CODEC_ID_H263:
3087 case AV_CODEC_ID_H263P:
3088 if (CONFIG_H263_ENCODER)
3089 ff_h263_encode_gob_header(s, mb_y);
3093 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3094 int bits= put_bits_count(&s->pb);
3095 s->misc_bits+= bits - s->last_bits;
3099 s->ptr_lastgob += current_packet_size;
3100 s->first_slice_line=1;
3101 s->resync_mb_x=mb_x;
3102 s->resync_mb_y=mb_y;
3106 if( (s->resync_mb_x == s->mb_x)
3107 && s->resync_mb_y+1 == s->mb_y){
3108 s->first_slice_line=0;
3112 s->dquant=0; //only for QP_RD
3114 update_mb_info(s, 0);
3116 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3118 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3120 copy_context_before_encode(&backup_s, s, -1);
3122 best_s.data_partitioning= s->data_partitioning;
3123 best_s.partitioned_frame= s->partitioned_frame;
3124 if(s->data_partitioning){
3125 backup_s.pb2= s->pb2;
3126 backup_s.tex_pb= s->tex_pb;
3129 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3130 s->mv_dir = MV_DIR_FORWARD;
3131 s->mv_type = MV_TYPE_16X16;
3133 s->mv[0][0][0] = s->p_mv_table[xy][0];
3134 s->mv[0][0][1] = s->p_mv_table[xy][1];
3135 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3136 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3138 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3139 s->mv_dir = MV_DIR_FORWARD;
3140 s->mv_type = MV_TYPE_FIELD;
3143 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3144 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3145 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3147 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3148 &dmin, &next_block, 0, 0);
3150 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3151 s->mv_dir = MV_DIR_FORWARD;
3152 s->mv_type = MV_TYPE_16X16;
3156 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3157 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3159 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3160 s->mv_dir = MV_DIR_FORWARD;
3161 s->mv_type = MV_TYPE_8X8;
3164 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3165 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3167 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3168 &dmin, &next_block, 0, 0);
3170 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3171 s->mv_dir = MV_DIR_FORWARD;
3172 s->mv_type = MV_TYPE_16X16;
3174 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3175 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3176 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3177 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3179 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3180 s->mv_dir = MV_DIR_BACKWARD;
3181 s->mv_type = MV_TYPE_16X16;
3183 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3184 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3185 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3186 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3188 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3189 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3190 s->mv_type = MV_TYPE_16X16;
3192 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3193 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3194 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3195 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3196 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3197 &dmin, &next_block, 0, 0);
3199 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3200 s->mv_dir = MV_DIR_FORWARD;
3201 s->mv_type = MV_TYPE_FIELD;
3204 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3205 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3206 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3208 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3209 &dmin, &next_block, 0, 0);
3211 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3212 s->mv_dir = MV_DIR_BACKWARD;
3213 s->mv_type = MV_TYPE_FIELD;
3216 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3217 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3218 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3220 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3221 &dmin, &next_block, 0, 0);
3223 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3224 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3225 s->mv_type = MV_TYPE_FIELD;
3227 for(dir=0; dir<2; dir++){
3229 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3230 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3231 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3234 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3235 &dmin, &next_block, 0, 0);
3237 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3239 s->mv_type = MV_TYPE_16X16;
3243 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3244 &dmin, &next_block, 0, 0);
3245 if(s->h263_pred || s->h263_aic){
3247 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3249 ff_clean_intra_table_entries(s); //old mode?
3253 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3254 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3255 const int last_qp= backup_s.qscale;
3258 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3259 static const int dquant_tab[4]={-1,1,-2,2};
3260 int storecoefs = s->mb_intra && s->dc_val[0];
3262 av_assert2(backup_s.dquant == 0);
3265 s->mv_dir= best_s.mv_dir;
3266 s->mv_type = MV_TYPE_16X16;
3267 s->mb_intra= best_s.mb_intra;
3268 s->mv[0][0][0] = best_s.mv[0][0][0];
3269 s->mv[0][0][1] = best_s.mv[0][0][1];
3270 s->mv[1][0][0] = best_s.mv[1][0][0];
3271 s->mv[1][0][1] = best_s.mv[1][0][1];
3273 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3274 for(; qpi<4; qpi++){
3275 int dquant= dquant_tab[qpi];
3276 qp= last_qp + dquant;
3277 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3279 backup_s.dquant= dquant;
3282 dc[i]= s->dc_val[0][ s->block_index[i] ];
3283 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3287 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3288 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3289 if(best_s.qscale != qp){
3292 s->dc_val[0][ s->block_index[i] ]= dc[i];
3293 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3300 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3301 int mx= s->b_direct_mv_table[xy][0];
3302 int my= s->b_direct_mv_table[xy][1];
3304 backup_s.dquant = 0;
3305 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3307 ff_mpeg4_set_direct_mv(s, mx, my);
3308 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3309 &dmin, &next_block, mx, my);
3311 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3312 backup_s.dquant = 0;
3313 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3315 ff_mpeg4_set_direct_mv(s, 0, 0);
3316 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3317 &dmin, &next_block, 0, 0);
3319 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3322 coded |= s->block_last_index[i];
3325 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3326 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3327 mx=my=0; //FIXME find the one we actually used
3328 ff_mpeg4_set_direct_mv(s, mx, my);
3329 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3337 s->mv_dir= best_s.mv_dir;
3338 s->mv_type = best_s.mv_type;
3340 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3341 s->mv[0][0][1] = best_s.mv[0][0][1];
3342 s->mv[1][0][0] = best_s.mv[1][0][0];
3343 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3346 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3347 &dmin, &next_block, mx, my);
3352 s->current_picture.qscale_table[xy] = best_s.qscale;
3354 copy_context_after_encode(s, &best_s, -1);
3356 pb_bits_count= put_bits_count(&s->pb);
3357 flush_put_bits(&s->pb);
3358 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3361 if(s->data_partitioning){
3362 pb2_bits_count= put_bits_count(&s->pb2);
3363 flush_put_bits(&s->pb2);
3364 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3365 s->pb2= backup_s.pb2;
3367 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3368 flush_put_bits(&s->tex_pb);
3369 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3370 s->tex_pb= backup_s.tex_pb;
3372 s->last_bits= put_bits_count(&s->pb);
3374 if (CONFIG_H263_ENCODER &&
3375 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3376 ff_h263_update_motion_val(s);
3378 if(next_block==0){ //FIXME 16 vs linesize16
3379 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3380 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3381 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3384 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3385 ff_mpv_reconstruct_mb(s, s->block);
3387 int motion_x = 0, motion_y = 0;
3388 s->mv_type=MV_TYPE_16X16;
3389 // only one MB-Type possible
3392 case CANDIDATE_MB_TYPE_INTRA:
3395 motion_x= s->mv[0][0][0] = 0;
3396 motion_y= s->mv[0][0][1] = 0;
3398 case CANDIDATE_MB_TYPE_INTER:
3399 s->mv_dir = MV_DIR_FORWARD;
3401 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3402 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3404 case CANDIDATE_MB_TYPE_INTER_I:
3405 s->mv_dir = MV_DIR_FORWARD;
3406 s->mv_type = MV_TYPE_FIELD;
3409 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3410 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3411 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3414 case CANDIDATE_MB_TYPE_INTER4V:
3415 s->mv_dir = MV_DIR_FORWARD;
3416 s->mv_type = MV_TYPE_8X8;
3419 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3420 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3423 case CANDIDATE_MB_TYPE_DIRECT:
3424 if (CONFIG_MPEG4_ENCODER) {
3425 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3427 motion_x=s->b_direct_mv_table[xy][0];
3428 motion_y=s->b_direct_mv_table[xy][1];
3429 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3432 case CANDIDATE_MB_TYPE_DIRECT0:
3433 if (CONFIG_MPEG4_ENCODER) {
3434 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3436 ff_mpeg4_set_direct_mv(s, 0, 0);
3439 case CANDIDATE_MB_TYPE_BIDIR:
3440 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3442 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3443 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3444 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3445 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3447 case CANDIDATE_MB_TYPE_BACKWARD:
3448 s->mv_dir = MV_DIR_BACKWARD;
3450 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3451 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3453 case CANDIDATE_MB_TYPE_FORWARD:
3454 s->mv_dir = MV_DIR_FORWARD;
3456 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3457 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3459 case CANDIDATE_MB_TYPE_FORWARD_I:
3460 s->mv_dir = MV_DIR_FORWARD;
3461 s->mv_type = MV_TYPE_FIELD;
3464 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3465 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3466 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3469 case CANDIDATE_MB_TYPE_BACKWARD_I:
3470 s->mv_dir = MV_DIR_BACKWARD;
3471 s->mv_type = MV_TYPE_FIELD;
3474 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3475 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3476 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3479 case CANDIDATE_MB_TYPE_BIDIR_I:
3480 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3481 s->mv_type = MV_TYPE_FIELD;
3483 for(dir=0; dir<2; dir++){
3485 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3486 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3487 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3492 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3495 encode_mb(s, motion_x, motion_y);
3497 // RAL: Update last macroblock type
3498 s->last_mv_dir = s->mv_dir;
3500 if (CONFIG_H263_ENCODER &&
3501 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3502 ff_h263_update_motion_val(s);
3504 ff_mpv_reconstruct_mb(s, s->block);
3507 /* clean the MV table in IPS frames for direct mode in B-frames */
3508 if(s->mb_intra /* && I,P,S_TYPE */){
3509 s->p_mv_table[xy][0]=0;
3510 s->p_mv_table[xy][1]=0;
3513 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3517 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3518 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3520 s->current_picture.encoding_error[0] += sse(
3521 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3522 s->dest[0], w, h, s->linesize);
3523 s->current_picture.encoding_error[1] += sse(
3524 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3525 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3526 s->current_picture.encoding_error[2] += sse(
3527 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3528 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3531 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3532 ff_h263_loop_filter(s);
3534 ff_dlog(s->avctx, "MB %d %d bits\n",
3535 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3539 //not beautiful here but we must write it before flushing so it has to be here
3540 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3541 ff_msmpeg4_encode_ext_header(s);
3545 #if FF_API_RTP_CALLBACK
3546 FF_DISABLE_DEPRECATION_WARNINGS
3547 /* Send the last GOB if RTP */
3548 if (s->avctx->rtp_callback) {
3549 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3550 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3551 /* Call the RTP callback to send the last GOB */
3553 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3555 FF_ENABLE_DEPRECATION_WARNINGS
3561 #define MERGE(field) dst->field += src->field; src->field=0
3562 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3563 MERGE(me.scene_change_score);
3564 MERGE(me.mc_mb_var_sum_temp);
3565 MERGE(me.mb_var_sum_temp);
3568 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3571 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3572 MERGE(dct_count[1]);
3581 MERGE(er.error_count);
3582 MERGE(padding_bug_score);
3583 MERGE(current_picture.encoding_error[0]);
3584 MERGE(current_picture.encoding_error[1]);
3585 MERGE(current_picture.encoding_error[2]);
3587 if (dst->noise_reduction){
3588 for(i=0; i<64; i++){
3589 MERGE(dct_error_sum[0][i]);
3590 MERGE(dct_error_sum[1][i]);
3594 av_assert1(put_bits_count(&src->pb) % 8 ==0);
3595 av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3596 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3597 flush_put_bits(&dst->pb);
3600 static int estimate_qp(MpegEncContext *s, int dry_run){
3601 if (s->next_lambda){
3602 s->current_picture_ptr->f->quality =
3603 s->current_picture.f->quality = s->next_lambda;
3604 if(!dry_run) s->next_lambda= 0;
3605 } else if (!s->fixed_qscale) {
3606 int quality = ff_rate_estimate_qscale(s, dry_run);
3607 s->current_picture_ptr->f->quality =
3608 s->current_picture.f->quality = quality;
3609 if (s->current_picture.f->quality < 0)
3613 if(s->adaptive_quant){
3614 switch(s->codec_id){
3615 case AV_CODEC_ID_MPEG4:
3616 if (CONFIG_MPEG4_ENCODER)
3617 ff_clean_mpeg4_qscales(s);
3619 case AV_CODEC_ID_H263:
3620 case AV_CODEC_ID_H263P:
3621 case AV_CODEC_ID_FLV1:
3622 if (CONFIG_H263_ENCODER)
3623 ff_clean_h263_qscales(s);
3626 ff_init_qscale_tab(s);
3629 s->lambda= s->lambda_table[0];
3632 s->lambda = s->current_picture.f->quality;
3637 /* must be called before writing the header */
3638 static void set_frame_distances(MpegEncContext * s){
3639 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3640 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3642 if(s->pict_type==AV_PICTURE_TYPE_B){
3643 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3644 av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3646 s->pp_time= s->time - s->last_non_b_time;
3647 s->last_non_b_time= s->time;
3648 av_assert1(s->picture_number==0 || s->pp_time > 0);
3652 static int encode_picture(MpegEncContext *s, int picture_number)
3656 int context_count = s->slice_context_count;
3658 s->picture_number = picture_number;
3660 /* Reset the average MB variance */
3661 s->me.mb_var_sum_temp =
3662 s->me.mc_mb_var_sum_temp = 0;
3664 /* we need to initialize some time vars before we can encode B-frames */
3665 // RAL: Condition added for MPEG1VIDEO
3666 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3667 set_frame_distances(s);
3668 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3669 ff_set_mpeg4_time(s);
3671 s->me.scene_change_score=0;
3673 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3675 if(s->pict_type==AV_PICTURE_TYPE_I){
3676 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3677 else s->no_rounding=0;
3678 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3679 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3680 s->no_rounding ^= 1;
3683 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3684 if (estimate_qp(s,1) < 0)
3686 ff_get_2pass_fcode(s);
3687 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3688 if(s->pict_type==AV_PICTURE_TYPE_B)
3689 s->lambda= s->last_lambda_for[s->pict_type];
3691 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3695 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3696 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3697 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3698 s->q_chroma_intra_matrix = s->q_intra_matrix;
3699 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3702 s->mb_intra=0; //for the rate distortion & bit compare functions
3703 for(i=1; i<context_count; i++){
3704 ret = ff_update_duplicate_context(s->thread_context[i], s);
3712 /* Estimate motion for every MB */
3713 if(s->pict_type != AV_PICTURE_TYPE_I){
3714 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3715 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3716 if (s->pict_type != AV_PICTURE_TYPE_B) {
3717 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3719 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3723 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3724 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3726 for(i=0; i<s->mb_stride*s->mb_height; i++)
3727 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3729 if(!s->fixed_qscale){
3730 /* finding spatial complexity for I-frame rate control */
3731 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3734 for(i=1; i<context_count; i++){
3735 merge_context_after_me(s, s->thread_context[i]);
3737 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3738 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3741 if (s->me.scene_change_score > s->scenechange_threshold &&
3742 s->pict_type == AV_PICTURE_TYPE_P) {
3743 s->pict_type= AV_PICTURE_TYPE_I;
3744 for(i=0; i<s->mb_stride*s->mb_height; i++)
3745 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3746 if(s->msmpeg4_version >= 3)
3748 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3749 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3753 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3754 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3756 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3758 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3759 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3760 s->f_code= FFMAX3(s->f_code, a, b);
3763 ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
3764 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3765 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3769 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3770 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3775 if(s->pict_type==AV_PICTURE_TYPE_B){
3778 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3779 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3780 s->f_code = FFMAX(a, b);
3782 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3783 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3784 s->b_code = FFMAX(a, b);
3786 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3787 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3788 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3789 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3790 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3792 for(dir=0; dir<2; dir++){
3795 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3796 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3797 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3798 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3806 if (estimate_qp(s, 0) < 0)
3809 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3810 s->pict_type == AV_PICTURE_TYPE_I &&
3811 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3812 s->qscale= 3; //reduce clipping problems
3814 if (s->out_format == FMT_MJPEG) {
3815 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3816 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3818 if (s->avctx->intra_matrix) {
3820 luma_matrix = s->avctx->intra_matrix;
3822 if (s->avctx->chroma_intra_matrix)
3823 chroma_matrix = s->avctx->chroma_intra_matrix;
3825 /* for mjpeg, we do include qscale in the matrix */
3827 int j = s->idsp.idct_permutation[i];
3829 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3830 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3832 s->y_dc_scale_table=
3833 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3834 s->chroma_intra_matrix[0] =
3835 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3836 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3837 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3838 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3839 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3842 if(s->codec_id == AV_CODEC_ID_AMV){
3843 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3844 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3846 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3848 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3849 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3851 s->y_dc_scale_table= y;
3852 s->c_dc_scale_table= c;
3853 s->intra_matrix[0] = 13;
3854 s->chroma_intra_matrix[0] = 14;
3855 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3856 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3857 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3858 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3862 //FIXME var duplication
3863 s->current_picture_ptr->f->key_frame =
3864 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3865 s->current_picture_ptr->f->pict_type =
3866 s->current_picture.f->pict_type = s->pict_type;
3868 if (s->current_picture.f->key_frame)
3869 s->picture_in_gop_number=0;
3871 s->mb_x = s->mb_y = 0;
3872 s->last_bits= put_bits_count(&s->pb);
3873 switch(s->out_format) {
3875 if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3876 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3877 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3880 if (CONFIG_H261_ENCODER)
3881 ff_h261_encode_picture_header(s, picture_number);
3884 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3885 ff_wmv2_encode_picture_header(s, picture_number);
3886 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3887 ff_msmpeg4_encode_picture_header(s, picture_number);
3888 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3889 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3892 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3893 ret = ff_rv10_encode_picture_header(s, picture_number);
3897 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3898 ff_rv20_encode_picture_header(s, picture_number);
3899 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3900 ff_flv_encode_picture_header(s, picture_number);
3901 else if (CONFIG_H263_ENCODER)
3902 ff_h263_encode_picture_header(s, picture_number);
3905 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3906 ff_mpeg1_encode_picture_header(s, picture_number);
3911 bits= put_bits_count(&s->pb);
3912 s->header_bits= bits - s->last_bits;
3914 for(i=1; i<context_count; i++){
3915 update_duplicate_context_after_me(s->thread_context[i], s);
3917 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3918 for(i=1; i<context_count; i++){
3919 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3920 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3921 merge_context_after_encode(s, s->thread_context[i]);
3927 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3928 const int intra= s->mb_intra;
3931 s->dct_count[intra]++;
3933 for(i=0; i<64; i++){
3934 int level= block[i];
3938 s->dct_error_sum[intra][i] += level;
3939 level -= s->dct_offset[intra][i];
3940 if(level<0) level=0;
3942 s->dct_error_sum[intra][i] -= level;
3943 level += s->dct_offset[intra][i];
3944 if(level>0) level=0;
3951 static int dct_quantize_trellis_c(MpegEncContext *s,
3952 int16_t *block, int n,
3953 int qscale, int *overflow){
3955 const uint16_t *matrix;
3956 const uint8_t *scantable;
3957 const uint8_t *perm_scantable;
3959 unsigned int threshold1, threshold2;
3971 int coeff_count[64];
3972 int qmul, qadd, start_i, last_non_zero, i, dc;
3973 const int esc_length= s->ac_esc_length;
3975 uint8_t * last_length;
3976 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3979 s->fdsp.fdct(block);
3981 if(s->dct_error_sum)
3982 s->denoise_dct(s, block);
3984 qadd= ((qscale-1)|1)*8;
3986 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3987 else mpeg2_qscale = qscale << 1;
3991 scantable= s->intra_scantable.scantable;
3992 perm_scantable= s->intra_scantable.permutated;
4000 /* For AIC we skip quant/dequant of INTRADC */
4005 /* note: block[0] is assumed to be positive */
4006 block[0] = (block[0] + (q >> 1)) / q;
4009 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4010 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4011 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4012 bias= 1<<(QMAT_SHIFT-1);
4014 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4015 length = s->intra_chroma_ac_vlc_length;
4016 last_length= s->intra_chroma_ac_vlc_last_length;
4018 length = s->intra_ac_vlc_length;
4019 last_length= s->intra_ac_vlc_last_length;
4022 scantable= s->inter_scantable.scantable;
4023 perm_scantable= s->inter_scantable.permutated;
4026 qmat = s->q_inter_matrix[qscale];
4027 matrix = s->inter_matrix;
4028 length = s->inter_ac_vlc_length;
4029 last_length= s->inter_ac_vlc_last_length;
4033 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4034 threshold2= (threshold1<<1);
4036 for(i=63; i>=start_i; i--) {
4037 const int j = scantable[i];
4038 int level = block[j] * qmat[j];
4040 if(((unsigned)(level+threshold1))>threshold2){
4046 for(i=start_i; i<=last_non_zero; i++) {
4047 const int j = scantable[i];
4048 int level = block[j] * qmat[j];
4050 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4051 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4052 if(((unsigned)(level+threshold1))>threshold2){
4054 level= (bias + level)>>QMAT_SHIFT;
4056 coeff[1][i]= level-1;
4057 // coeff[2][k]= level-2;
4059 level= (bias - level)>>QMAT_SHIFT;
4060 coeff[0][i]= -level;
4061 coeff[1][i]= -level+1;
4062 // coeff[2][k]= -level+2;
4064 coeff_count[i]= FFMIN(level, 2);
4065 av_assert2(coeff_count[i]);
4068 coeff[0][i]= (level>>31)|1;
4073 *overflow= s->max_qcoeff < max; //overflow might have happened
4075 if(last_non_zero < start_i){
4076 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4077 return last_non_zero;
4080 score_tab[start_i]= 0;
4081 survivor[0]= start_i;
4084 for(i=start_i; i<=last_non_zero; i++){
4085 int level_index, j, zero_distortion;
4086 int dct_coeff= FFABS(block[ scantable[i] ]);
4087 int best_score=256*256*256*120;
4089 if (s->fdsp.fdct == ff_fdct_ifast)
4090 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4091 zero_distortion= dct_coeff*dct_coeff;
4093 for(level_index=0; level_index < coeff_count[i]; level_index++){
4095 int level= coeff[level_index][i];
4096 const int alevel= FFABS(level);
4101 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4102 unquant_coeff= alevel*qmul + qadd;
4103 } else if(s->out_format == FMT_MJPEG) {
4104 j = s->idsp.idct_permutation[scantable[i]];
4105 unquant_coeff = alevel * matrix[j] * 8;
4107 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4109 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4110 unquant_coeff = (unquant_coeff - 1) | 1;
4112 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4113 unquant_coeff = (unquant_coeff - 1) | 1;
4118 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4120 if((level&(~127)) == 0){
4121 for(j=survivor_count-1; j>=0; j--){
4122 int run= i - survivor[j];
4123 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4124 score += score_tab[i-run];
4126 if(score < best_score){
4129 level_tab[i+1]= level-64;
4133 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4134 for(j=survivor_count-1; j>=0; j--){
4135 int run= i - survivor[j];
4136 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4137 score += score_tab[i-run];
4138 if(score < last_score){
4141 last_level= level-64;
4147 distortion += esc_length*lambda;
4148 for(j=survivor_count-1; j>=0; j--){
4149 int run= i - survivor[j];
4150 int score= distortion + score_tab[i-run];
4152 if(score < best_score){
4155 level_tab[i+1]= level-64;
4159 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4160 for(j=survivor_count-1; j>=0; j--){
4161 int run= i - survivor[j];
4162 int score= distortion + score_tab[i-run];
4163 if(score < last_score){
4166 last_level= level-64;
4174 score_tab[i+1]= best_score;
4176 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4177 if(last_non_zero <= 27){
4178 for(; survivor_count; survivor_count--){
4179 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4183 for(; survivor_count; survivor_count--){
4184 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4189 survivor[ survivor_count++ ]= i+1;
4192 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4193 last_score= 256*256*256*120;
4194 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4195 int score= score_tab[i];
4197 score += lambda * 2; // FIXME more exact?
4199 if(score < last_score){
4202 last_level= level_tab[i];
4203 last_run= run_tab[i];
4208 s->coded_score[n] = last_score;
4210 dc= FFABS(block[0]);
4211 last_non_zero= last_i - 1;
4212 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4214 if(last_non_zero < start_i)
4215 return last_non_zero;
4217 if(last_non_zero == 0 && start_i == 0){
4219 int best_score= dc * dc;
4221 for(i=0; i<coeff_count[0]; i++){
4222 int level= coeff[i][0];
4223 int alevel= FFABS(level);
4224 int unquant_coeff, score, distortion;
4226 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4227 unquant_coeff= (alevel*qmul + qadd)>>3;
4229 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4230 unquant_coeff = (unquant_coeff - 1) | 1;
4232 unquant_coeff = (unquant_coeff + 4) >> 3;
4233 unquant_coeff<<= 3 + 3;
4235 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4237 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4238 else score= distortion + esc_length*lambda;
4240 if(score < best_score){
4242 best_level= level - 64;
4245 block[0]= best_level;
4246 s->coded_score[n] = best_score - dc*dc;
4247 if(best_level == 0) return -1;
4248 else return last_non_zero;
4252 av_assert2(last_level);
4254 block[ perm_scantable[last_non_zero] ]= last_level;
4257 for(; i>start_i; i -= run_tab[i] + 1){
4258 block[ perm_scantable[i-1] ]= level_tab[i];
4261 return last_non_zero;
4264 static int16_t basis[64][64];
4266 static void build_basis(uint8_t *perm){
4273 double s= 0.25*(1<<BASIS_SHIFT);
4275 int perm_index= perm[index];
4276 if(i==0) s*= sqrt(0.5);
4277 if(j==0) s*= sqrt(0.5);
4278 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4285 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4286 int16_t *block, int16_t *weight, int16_t *orig,
4289 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4290 const uint8_t *scantable;
4291 const uint8_t *perm_scantable;
4292 // unsigned int threshold1, threshold2;
4297 int qmul, qadd, start_i, last_non_zero, i, dc;
4299 uint8_t * last_length;
4301 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4303 if(basis[0][0] == 0)
4304 build_basis(s->idsp.idct_permutation);
4309 scantable= s->intra_scantable.scantable;
4310 perm_scantable= s->intra_scantable.permutated;
4317 /* For AIC we skip quant/dequant of INTRADC */
4321 q <<= RECON_SHIFT-3;
4322 /* note: block[0] is assumed to be positive */
4324 // block[0] = (block[0] + (q >> 1)) / q;
4326 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4327 // bias= 1<<(QMAT_SHIFT-1);
4328 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4329 length = s->intra_chroma_ac_vlc_length;
4330 last_length= s->intra_chroma_ac_vlc_last_length;
4332 length = s->intra_ac_vlc_length;
4333 last_length= s->intra_ac_vlc_last_length;
4336 scantable= s->inter_scantable.scantable;
4337 perm_scantable= s->inter_scantable.permutated;
4340 length = s->inter_ac_vlc_length;
4341 last_length= s->inter_ac_vlc_last_length;
4343 last_non_zero = s->block_last_index[n];
4345 dc += (1<<(RECON_SHIFT-1));
4346 for(i=0; i<64; i++){
4347 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4351 for(i=0; i<64; i++){
4356 w= FFABS(weight[i]) + qns*one;
4357 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4360 // w=weight[i] = (63*qns + (w/2)) / w;
4363 av_assert2(w<(1<<6));
4366 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4370 for(i=start_i; i<=last_non_zero; i++){
4371 int j= perm_scantable[i];
4372 const int level= block[j];
4376 if(level<0) coeff= qmul*level - qadd;
4377 else coeff= qmul*level + qadd;
4378 run_tab[rle_index++]=run;
4381 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4388 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4391 int run2, best_unquant_change=0, analyze_gradient;
4392 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4394 if(analyze_gradient){
4395 for(i=0; i<64; i++){
4398 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4404 const int level= block[0];
4405 int change, old_coeff;
4407 av_assert2(s->mb_intra);
4411 for(change=-1; change<=1; change+=2){
4412 int new_level= level + change;
4413 int score, new_coeff;
4415 new_coeff= q*new_level;
4416 if(new_coeff >= 2048 || new_coeff < 0)
4419 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4420 new_coeff - old_coeff);
4421 if(score<best_score){
4424 best_change= change;
4425 best_unquant_change= new_coeff - old_coeff;
4432 run2= run_tab[rle_index++];
4436 for(i=start_i; i<64; i++){
4437 int j= perm_scantable[i];
4438 const int level= block[j];
4439 int change, old_coeff;
4441 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4445 if(level<0) old_coeff= qmul*level - qadd;
4446 else old_coeff= qmul*level + qadd;
4447 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4451 av_assert2(run2>=0 || i >= last_non_zero );
4454 for(change=-1; change<=1; change+=2){
4455 int new_level= level + change;
4456 int score, new_coeff, unquant_change;
4459 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4463 if(new_level<0) new_coeff= qmul*new_level - qadd;
4464 else new_coeff= qmul*new_level + qadd;
4465 if(new_coeff >= 2048 || new_coeff <= -2048)
4467 //FIXME check for overflow
4470 if(level < 63 && level > -63){
4471 if(i < last_non_zero)
4472 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4473 - length[UNI_AC_ENC_INDEX(run, level+64)];
4475 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4476 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4479 av_assert2(FFABS(new_level)==1);
4481 if(analyze_gradient){
4482 int g= d1[ scantable[i] ];
4483 if(g && (g^new_level) >= 0)
4487 if(i < last_non_zero){
4488 int next_i= i + run2 + 1;
4489 int next_level= block[ perm_scantable[next_i] ] + 64;
4491 if(next_level&(~127))
4494 if(next_i < last_non_zero)
4495 score += length[UNI_AC_ENC_INDEX(run, 65)]
4496 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4497 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4499 score += length[UNI_AC_ENC_INDEX(run, 65)]
4500 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4501 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4503 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4505 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4506 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4512 av_assert2(FFABS(level)==1);
4514 if(i < last_non_zero){
4515 int next_i= i + run2 + 1;
4516 int next_level= block[ perm_scantable[next_i] ] + 64;
4518 if(next_level&(~127))
4521 if(next_i < last_non_zero)
4522 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4523 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4524 - length[UNI_AC_ENC_INDEX(run, 65)];
4526 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4527 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4528 - length[UNI_AC_ENC_INDEX(run, 65)];
4530 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4532 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4533 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4540 unquant_change= new_coeff - old_coeff;
4541 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4543 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4545 if(score<best_score){
4548 best_change= change;
4549 best_unquant_change= unquant_change;
4553 prev_level= level + 64;
4554 if(prev_level&(~127))
4564 int j= perm_scantable[ best_coeff ];
4566 block[j] += best_change;
4568 if(best_coeff > last_non_zero){
4569 last_non_zero= best_coeff;
4570 av_assert2(block[j]);
4572 for(; last_non_zero>=start_i; last_non_zero--){
4573 if(block[perm_scantable[last_non_zero]])
4580 for(i=start_i; i<=last_non_zero; i++){
4581 int j= perm_scantable[i];
4582 const int level= block[j];
4585 run_tab[rle_index++]=run;
4592 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4598 return last_non_zero;
4602 * Permute an 8x8 block according to permutation.
4603 * @param block the block which will be permuted according to
4604 * the given permutation vector
4605 * @param permutation the permutation vector
4606 * @param last the last non zero coefficient in scantable order, used to
4607 * speed the permutation up
4608 * @param scantable the used scantable, this is only used to speed the
4609 * permutation up, the block is not (inverse) permutated
4610 * to scantable order!
4612 void ff_block_permute(int16_t *block, uint8_t *permutation,
4613 const uint8_t *scantable, int last)
4620 //FIXME it is ok but not clean and might fail for some permutations
4621 // if (permutation[1] == 1)
4624 for (i = 0; i <= last; i++) {
4625 const int j = scantable[i];
4630 for (i = 0; i <= last; i++) {
4631 const int j = scantable[i];
4632 const int perm_j = permutation[j];
4633 block[perm_j] = temp[j];
4637 int ff_dct_quantize_c(MpegEncContext *s,
4638 int16_t *block, int n,
4639 int qscale, int *overflow)
4641 int i, j, level, last_non_zero, q, start_i;
4643 const uint8_t *scantable;
4646 unsigned int threshold1, threshold2;
4648 s->fdsp.fdct(block);
4650 if(s->dct_error_sum)
4651 s->denoise_dct(s, block);
4654 scantable= s->intra_scantable.scantable;
4662 /* For AIC we skip quant/dequant of INTRADC */
4665 /* note: block[0] is assumed to be positive */
4666 block[0] = (block[0] + (q >> 1)) / q;
4669 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4670 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4672 scantable= s->inter_scantable.scantable;
4675 qmat = s->q_inter_matrix[qscale];
4676 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4678 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4679 threshold2= (threshold1<<1);
4680 for(i=63;i>=start_i;i--) {
4682 level = block[j] * qmat[j];
4684 if(((unsigned)(level+threshold1))>threshold2){
4691 for(i=start_i; i<=last_non_zero; i++) {
4693 level = block[j] * qmat[j];
4695 // if( bias+level >= (1<<QMAT_SHIFT)
4696 // || bias-level >= (1<<QMAT_SHIFT)){
4697 if(((unsigned)(level+threshold1))>threshold2){
4699 level= (bias + level)>>QMAT_SHIFT;
4702 level= (bias - level)>>QMAT_SHIFT;
4710 *overflow= s->max_qcoeff < max; //overflow might have happened
4712 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4713 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4714 ff_block_permute(block, s->idsp.idct_permutation,
4715 scantable, last_non_zero);
4717 return last_non_zero;
4720 #define OFFSET(x) offsetof(MpegEncContext, x)
4721 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4722 static const AVOption h263_options[] = {
4723 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4724 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4729 static const AVClass h263_class = {
4730 .class_name = "H.263 encoder",
4731 .item_name = av_default_item_name,
4732 .option = h263_options,
4733 .version = LIBAVUTIL_VERSION_INT,
4736 AVCodec ff_h263_encoder = {
4738 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4739 .type = AVMEDIA_TYPE_VIDEO,
4740 .id = AV_CODEC_ID_H263,
4741 .priv_data_size = sizeof(MpegEncContext),
4742 .init = ff_mpv_encode_init,
4743 .encode2 = ff_mpv_encode_picture,
4744 .close = ff_mpv_encode_end,
4745 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4746 .priv_class = &h263_class,
4749 static const AVOption h263p_options[] = {
4750 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4751 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4752 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4753 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4757 static const AVClass h263p_class = {
4758 .class_name = "H.263p encoder",
4759 .item_name = av_default_item_name,
4760 .option = h263p_options,
4761 .version = LIBAVUTIL_VERSION_INT,
4764 AVCodec ff_h263p_encoder = {
4766 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4767 .type = AVMEDIA_TYPE_VIDEO,
4768 .id = AV_CODEC_ID_H263P,
4769 .priv_data_size = sizeof(MpegEncContext),
4770 .init = ff_mpv_encode_init,
4771 .encode2 = ff_mpv_encode_picture,
4772 .close = ff_mpv_encode_end,
4773 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4774 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4775 .priv_class = &h263p_class,
4778 static const AVClass msmpeg4v2_class = {
4779 .class_name = "msmpeg4v2 encoder",
4780 .item_name = av_default_item_name,
4781 .option = ff_mpv_generic_options,
4782 .version = LIBAVUTIL_VERSION_INT,
4785 AVCodec ff_msmpeg4v2_encoder = {
4786 .name = "msmpeg4v2",
4787 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4788 .type = AVMEDIA_TYPE_VIDEO,
4789 .id = AV_CODEC_ID_MSMPEG4V2,
4790 .priv_data_size = sizeof(MpegEncContext),
4791 .init = ff_mpv_encode_init,
4792 .encode2 = ff_mpv_encode_picture,
4793 .close = ff_mpv_encode_end,
4794 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4795 .priv_class = &msmpeg4v2_class,
4798 static const AVClass msmpeg4v3_class = {
4799 .class_name = "msmpeg4v3 encoder",
4800 .item_name = av_default_item_name,
4801 .option = ff_mpv_generic_options,
4802 .version = LIBAVUTIL_VERSION_INT,
4805 AVCodec ff_msmpeg4v3_encoder = {
4807 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4808 .type = AVMEDIA_TYPE_VIDEO,
4809 .id = AV_CODEC_ID_MSMPEG4V3,
4810 .priv_data_size = sizeof(MpegEncContext),
4811 .init = ff_mpv_encode_init,
4812 .encode2 = ff_mpv_encode_picture,
4813 .close = ff_mpv_encode_end,
4814 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4815 .priv_class = &msmpeg4v3_class,
4818 static const AVClass wmv1_class = {
4819 .class_name = "wmv1 encoder",
4820 .item_name = av_default_item_name,
4821 .option = ff_mpv_generic_options,
4822 .version = LIBAVUTIL_VERSION_INT,
4825 AVCodec ff_wmv1_encoder = {
4827 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4828 .type = AVMEDIA_TYPE_VIDEO,
4829 .id = AV_CODEC_ID_WMV1,
4830 .priv_data_size = sizeof(MpegEncContext),
4831 .init = ff_mpv_encode_init,
4832 .encode2 = ff_mpv_encode_picture,
4833 .close = ff_mpv_encode_end,
4834 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4835 .priv_class = &wmv1_class,