2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "libavutil/timer.h"
46 #include "mpegvideo.h"
47 #include "mpegvideodata.h"
51 #include "mjpegenc_common.h"
53 #include "mpegutils.h"
56 #include "pixblockdsp.h"
60 #include "aandcttab.h"
62 #include "mpeg4video.h"
64 #include "bytestream.h"
71 #define QUANT_BIAS_SHIFT 8
73 #define QMAT_SHIFT_MMX 16
76 static int encode_picture(MpegEncContext *s, int picture_number);
77 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
78 static int sse_mb(MpegEncContext *s);
79 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
80 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
82 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
83 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
85 const AVOption ff_mpv_generic_options[] = {
90 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
91 uint16_t (*qmat16)[2][64],
92 const uint16_t *quant_matrix,
93 int bias, int qmin, int qmax, int intra)
95 FDCTDSPContext *fdsp = &s->fdsp;
99 for (qscale = qmin; qscale <= qmax; qscale++) {
103 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
104 else qscale2 = qscale << 1;
106 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
108 fdsp->fdct == ff_faandct ||
109 #endif /* CONFIG_FAANDCT */
110 fdsp->fdct == ff_jpeg_fdct_islow_10) {
111 for (i = 0; i < 64; i++) {
112 const int j = s->idsp.idct_permutation[i];
113 int64_t den = (int64_t) qscale2 * quant_matrix[j];
114 /* 16 <= qscale * quant_matrix[i] <= 7905
115 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
116 * 19952 <= x <= 249205026
117 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
118 * 3444240 >= (1 << 36) / (x) >= 275 */
120 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
122 } else if (fdsp->fdct == ff_fdct_ifast) {
123 for (i = 0; i < 64; i++) {
124 const int j = s->idsp.idct_permutation[i];
125 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
126 /* 16 <= qscale * quant_matrix[i] <= 7905
127 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
128 * 19952 <= x <= 249205026
129 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
130 * 3444240 >= (1 << 36) / (x) >= 275 */
132 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
135 for (i = 0; i < 64; i++) {
136 const int j = s->idsp.idct_permutation[i];
137 int64_t den = (int64_t) qscale2 * quant_matrix[j];
138 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
139 * Assume x = qscale * quant_matrix[i]
141 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
142 * so 32768 >= (1 << 19) / (x) >= 67 */
143 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
144 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
145 // (qscale * quant_matrix[i]);
146 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
148 if (qmat16[qscale][0][i] == 0 ||
149 qmat16[qscale][0][i] == 128 * 256)
150 qmat16[qscale][0][i] = 128 * 256 - 1;
151 qmat16[qscale][1][i] =
152 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
153 qmat16[qscale][0][i]);
157 for (i = intra; i < 64; i++) {
159 if (fdsp->fdct == ff_fdct_ifast) {
160 max = (8191LL * ff_aanscales[i]) >> 14;
162 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
168 av_log(NULL, AV_LOG_INFO,
169 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
174 static inline void update_qscale(MpegEncContext *s)
176 if (s->q_scale_type == 1 && 0) {
178 int bestdiff=INT_MAX;
181 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
182 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
183 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
184 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
186 if (diff < bestdiff) {
193 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
194 (FF_LAMBDA_SHIFT + 7);
195 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
198 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
202 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
208 for (i = 0; i < 64; i++) {
209 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
216 * init s->current_picture.qscale_table from s->lambda_table
218 void ff_init_qscale_tab(MpegEncContext *s)
220 int8_t * const qscale_table = s->current_picture.qscale_table;
223 for (i = 0; i < s->mb_num; i++) {
224 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
225 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
226 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
231 static void update_duplicate_context_after_me(MpegEncContext *dst,
234 #define COPY(a) dst->a= src->a
236 COPY(current_picture);
242 COPY(picture_in_gop_number);
243 COPY(gop_picture_number);
244 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
245 COPY(progressive_frame); // FIXME don't set in encode_header
246 COPY(partitioned_frame); // FIXME don't set in encode_header
251 * Set the given MpegEncContext to defaults for encoding.
252 * the changed fields will not depend upon the prior state of the MpegEncContext.
254 static void mpv_encode_defaults(MpegEncContext *s)
257 ff_mpv_common_defaults(s);
259 for (i = -16; i < 16; i++) {
260 default_fcode_tab[i + MAX_MV] = 1;
262 s->me.mv_penalty = default_mv_penalty;
263 s->fcode_tab = default_fcode_tab;
265 s->input_picture_number = 0;
266 s->picture_in_gop_number = 0;
269 av_cold int ff_dct_encode_init(MpegEncContext *s)
272 ff_dct_encode_init_x86(s);
274 if (CONFIG_H263_ENCODER)
275 ff_h263dsp_init(&s->h263dsp);
276 if (!s->dct_quantize)
277 s->dct_quantize = ff_dct_quantize_c;
279 s->denoise_dct = denoise_dct_c;
280 s->fast_dct_quantize = s->dct_quantize;
281 if (s->avctx->trellis)
282 s->dct_quantize = dct_quantize_trellis_c;
287 /* init video encoder */
288 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
290 MpegEncContext *s = avctx->priv_data;
291 AVCPBProperties *cpb_props;
292 int i, ret, format_supported;
294 mpv_encode_defaults(s);
296 switch (avctx->codec_id) {
297 case AV_CODEC_ID_MPEG2VIDEO:
298 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
299 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
300 av_log(avctx, AV_LOG_ERROR,
301 "only YUV420 and YUV422 are supported\n");
305 case AV_CODEC_ID_MJPEG:
306 case AV_CODEC_ID_AMV:
307 format_supported = 0;
308 /* JPEG color space */
309 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
310 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
311 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
312 (avctx->color_range == AVCOL_RANGE_JPEG &&
313 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
314 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
315 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
316 format_supported = 1;
317 /* MPEG color space */
318 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
319 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
320 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
321 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
322 format_supported = 1;
324 if (!format_supported) {
325 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
330 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
331 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
336 switch (avctx->pix_fmt) {
337 case AV_PIX_FMT_YUVJ444P:
338 case AV_PIX_FMT_YUV444P:
339 s->chroma_format = CHROMA_444;
341 case AV_PIX_FMT_YUVJ422P:
342 case AV_PIX_FMT_YUV422P:
343 s->chroma_format = CHROMA_422;
345 case AV_PIX_FMT_YUVJ420P:
346 case AV_PIX_FMT_YUV420P:
348 s->chroma_format = CHROMA_420;
352 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
354 #if FF_API_PRIVATE_OPT
355 FF_DISABLE_DEPRECATION_WARNINGS
356 if (avctx->rtp_payload_size)
357 s->rtp_payload_size = avctx->rtp_payload_size;
358 if (avctx->me_penalty_compensation)
359 s->me_penalty_compensation = avctx->me_penalty_compensation;
361 s->me_pre = avctx->pre_me;
362 FF_ENABLE_DEPRECATION_WARNINGS
365 s->bit_rate = avctx->bit_rate;
366 s->width = avctx->width;
367 s->height = avctx->height;
368 if (avctx->gop_size > 600 &&
369 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
370 av_log(avctx, AV_LOG_WARNING,
371 "keyframe interval too large!, reducing it from %d to %d\n",
372 avctx->gop_size, 600);
373 avctx->gop_size = 600;
375 s->gop_size = avctx->gop_size;
377 if (avctx->max_b_frames > MAX_B_FRAMES) {
378 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
379 "is %d.\n", MAX_B_FRAMES);
380 avctx->max_b_frames = MAX_B_FRAMES;
382 s->max_b_frames = avctx->max_b_frames;
383 s->codec_id = avctx->codec->id;
384 s->strict_std_compliance = avctx->strict_std_compliance;
385 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
386 s->rtp_mode = !!s->rtp_payload_size;
387 s->intra_dc_precision = avctx->intra_dc_precision;
389 // workaround some differences between how applications specify dc precision
390 if (s->intra_dc_precision < 0) {
391 s->intra_dc_precision += 8;
392 } else if (s->intra_dc_precision >= 8)
393 s->intra_dc_precision -= 8;
395 if (s->intra_dc_precision < 0) {
396 av_log(avctx, AV_LOG_ERROR,
397 "intra dc precision must be positive, note some applications use"
398 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
399 return AVERROR(EINVAL);
402 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
405 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
406 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
407 return AVERROR(EINVAL);
409 s->user_specified_pts = AV_NOPTS_VALUE;
411 if (s->gop_size <= 1) {
418 #if FF_API_MOTION_EST
419 FF_DISABLE_DEPRECATION_WARNINGS
420 s->me_method = avctx->me_method;
421 FF_ENABLE_DEPRECATION_WARNINGS
425 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
428 FF_DISABLE_DEPRECATION_WARNINGS
429 if (avctx->border_masking != 0.0)
430 s->border_masking = avctx->border_masking;
431 FF_ENABLE_DEPRECATION_WARNINGS
434 s->adaptive_quant = (s->avctx->lumi_masking ||
435 s->avctx->dark_masking ||
436 s->avctx->temporal_cplx_masking ||
437 s->avctx->spatial_cplx_masking ||
438 s->avctx->p_masking ||
440 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
443 s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
445 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
446 switch(avctx->codec_id) {
447 case AV_CODEC_ID_MPEG1VIDEO:
448 case AV_CODEC_ID_MPEG2VIDEO:
449 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
451 case AV_CODEC_ID_MPEG4:
452 case AV_CODEC_ID_MSMPEG4V1:
453 case AV_CODEC_ID_MSMPEG4V2:
454 case AV_CODEC_ID_MSMPEG4V3:
455 if (avctx->rc_max_rate >= 15000000) {
456 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
457 } else if(avctx->rc_max_rate >= 2000000) {
458 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
459 } else if(avctx->rc_max_rate >= 384000) {
460 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
462 avctx->rc_buffer_size = 40;
463 avctx->rc_buffer_size *= 16384;
466 if (avctx->rc_buffer_size) {
467 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
471 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
472 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
476 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
477 av_log(avctx, AV_LOG_INFO,
478 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
481 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
482 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
486 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
487 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
491 if (avctx->rc_max_rate &&
492 avctx->rc_max_rate == avctx->bit_rate &&
493 avctx->rc_max_rate != avctx->rc_min_rate) {
494 av_log(avctx, AV_LOG_INFO,
495 "impossible bitrate constraints, this will fail\n");
498 if (avctx->rc_buffer_size &&
499 avctx->bit_rate * (int64_t)avctx->time_base.num >
500 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
501 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
505 if (!s->fixed_qscale &&
506 avctx->bit_rate * av_q2d(avctx->time_base) >
507 avctx->bit_rate_tolerance) {
508 av_log(avctx, AV_LOG_WARNING,
509 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, (int64_t)avctx->bit_rate);
510 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
513 if (s->avctx->rc_max_rate &&
514 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
515 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
516 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
517 90000LL * (avctx->rc_buffer_size - 1) >
518 s->avctx->rc_max_rate * 0xFFFFLL) {
519 av_log(avctx, AV_LOG_INFO,
520 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
521 "specified vbv buffer is too large for the given bitrate!\n");
524 if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
525 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
526 s->codec_id != AV_CODEC_ID_FLV1) {
527 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
531 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
532 av_log(avctx, AV_LOG_ERROR,
533 "OBMC is only supported with simple mb decision\n");
537 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
538 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
542 if (s->max_b_frames &&
543 s->codec_id != AV_CODEC_ID_MPEG4 &&
544 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
545 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
546 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
549 if (s->max_b_frames < 0) {
550 av_log(avctx, AV_LOG_ERROR,
551 "max b frames must be 0 or positive for mpegvideo based encoders\n");
555 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
556 s->codec_id == AV_CODEC_ID_H263 ||
557 s->codec_id == AV_CODEC_ID_H263P) &&
558 (avctx->sample_aspect_ratio.num > 255 ||
559 avctx->sample_aspect_ratio.den > 255)) {
560 av_log(avctx, AV_LOG_WARNING,
561 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
562 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
563 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
564 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
567 if ((s->codec_id == AV_CODEC_ID_H263 ||
568 s->codec_id == AV_CODEC_ID_H263P) &&
569 (avctx->width > 2048 ||
570 avctx->height > 1152 )) {
571 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
574 if ((s->codec_id == AV_CODEC_ID_H263 ||
575 s->codec_id == AV_CODEC_ID_H263P) &&
576 ((avctx->width &3) ||
577 (avctx->height&3) )) {
578 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
582 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
583 (avctx->width > 4095 ||
584 avctx->height > 4095 )) {
585 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
589 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
590 (avctx->width > 16383 ||
591 avctx->height > 16383 )) {
592 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
596 if (s->codec_id == AV_CODEC_ID_RV10 &&
598 avctx->height&15 )) {
599 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
600 return AVERROR(EINVAL);
603 if (s->codec_id == AV_CODEC_ID_RV20 &&
606 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
607 return AVERROR(EINVAL);
610 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
611 s->codec_id == AV_CODEC_ID_WMV2) &&
613 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
617 if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
618 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
619 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
623 #if FF_API_PRIVATE_OPT
624 FF_DISABLE_DEPRECATION_WARNINGS
625 if (avctx->mpeg_quant)
626 s->mpeg_quant = avctx->mpeg_quant;
627 FF_ENABLE_DEPRECATION_WARNINGS
630 // FIXME mpeg2 uses that too
631 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
632 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
633 av_log(avctx, AV_LOG_ERROR,
634 "mpeg2 style quantization not supported by codec\n");
638 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
639 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
643 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
644 s->avctx->mb_decision != FF_MB_DECISION_RD) {
645 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
649 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
650 (s->codec_id == AV_CODEC_ID_AMV ||
651 s->codec_id == AV_CODEC_ID_MJPEG)) {
652 // Used to produce garbage with MJPEG.
653 av_log(avctx, AV_LOG_ERROR,
654 "QP RD is no longer compatible with MJPEG or AMV\n");
658 #if FF_API_PRIVATE_OPT
659 FF_DISABLE_DEPRECATION_WARNINGS
660 if (avctx->scenechange_threshold)
661 s->scenechange_threshold = avctx->scenechange_threshold;
662 FF_ENABLE_DEPRECATION_WARNINGS
665 if (s->scenechange_threshold < 1000000000 &&
666 (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
667 av_log(avctx, AV_LOG_ERROR,
668 "closed gop with scene change detection are not supported yet, "
669 "set threshold to 1000000000\n");
673 if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
674 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
675 av_log(avctx, AV_LOG_ERROR,
676 "low delay forcing is only available for mpeg2\n");
679 if (s->max_b_frames != 0) {
680 av_log(avctx, AV_LOG_ERROR,
681 "B-frames cannot be used with low delay\n");
686 if (s->q_scale_type == 1) {
687 if (avctx->qmax > 28) {
688 av_log(avctx, AV_LOG_ERROR,
689 "non linear quant only supports qmax <= 28 currently\n");
694 if (avctx->slices > 1 &&
695 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
696 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
697 return AVERROR(EINVAL);
700 if (s->avctx->thread_count > 1 &&
701 s->codec_id != AV_CODEC_ID_MPEG4 &&
702 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
703 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
704 s->codec_id != AV_CODEC_ID_MJPEG &&
705 (s->codec_id != AV_CODEC_ID_H263P)) {
706 av_log(avctx, AV_LOG_ERROR,
707 "multi threaded encoding not supported by codec\n");
711 if (s->avctx->thread_count < 1) {
712 av_log(avctx, AV_LOG_ERROR,
713 "automatic thread number detection not supported by codec, "
718 if (!avctx->time_base.den || !avctx->time_base.num) {
719 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
723 #if FF_API_PRIVATE_OPT
724 FF_DISABLE_DEPRECATION_WARNINGS
725 if (avctx->b_frame_strategy)
726 s->b_frame_strategy = avctx->b_frame_strategy;
727 if (avctx->b_sensitivity != 40)
728 s->b_sensitivity = avctx->b_sensitivity;
729 FF_ENABLE_DEPRECATION_WARNINGS
732 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
733 av_log(avctx, AV_LOG_INFO,
734 "notice: b_frame_strategy only affects the first pass\n");
735 s->b_frame_strategy = 0;
738 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
740 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
741 avctx->time_base.den /= i;
742 avctx->time_base.num /= i;
746 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
747 // (a + x * 3 / 8) / x
748 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
749 s->inter_quant_bias = 0;
751 s->intra_quant_bias = 0;
753 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
756 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
757 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
758 return AVERROR(EINVAL);
761 #if FF_API_QUANT_BIAS
762 FF_DISABLE_DEPRECATION_WARNINGS
763 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
764 s->intra_quant_bias = avctx->intra_quant_bias;
765 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
766 s->inter_quant_bias = avctx->inter_quant_bias;
767 FF_ENABLE_DEPRECATION_WARNINGS
770 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
772 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
773 s->avctx->time_base.den > (1 << 16) - 1) {
774 av_log(avctx, AV_LOG_ERROR,
775 "timebase %d/%d not supported by MPEG 4 standard, "
776 "the maximum admitted value for the timebase denominator "
777 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
781 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
783 switch (avctx->codec->id) {
784 case AV_CODEC_ID_MPEG1VIDEO:
785 s->out_format = FMT_MPEG1;
786 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
787 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
789 case AV_CODEC_ID_MPEG2VIDEO:
790 s->out_format = FMT_MPEG1;
791 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
792 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
795 case AV_CODEC_ID_MJPEG:
796 case AV_CODEC_ID_AMV:
797 s->out_format = FMT_MJPEG;
798 s->intra_only = 1; /* force intra only for jpeg */
799 if (!CONFIG_MJPEG_ENCODER ||
800 ff_mjpeg_encode_init(s) < 0)
805 case AV_CODEC_ID_H261:
806 if (!CONFIG_H261_ENCODER)
808 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
809 av_log(avctx, AV_LOG_ERROR,
810 "The specified picture size of %dx%d is not valid for the "
811 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
812 s->width, s->height);
815 s->out_format = FMT_H261;
818 s->rtp_mode = 0; /* Sliced encoding not supported */
820 case AV_CODEC_ID_H263:
821 if (!CONFIG_H263_ENCODER)
823 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
824 s->width, s->height) == 8) {
825 av_log(avctx, AV_LOG_ERROR,
826 "The specified picture size of %dx%d is not valid for "
827 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
828 "352x288, 704x576, and 1408x1152. "
829 "Try H.263+.\n", s->width, s->height);
832 s->out_format = FMT_H263;
836 case AV_CODEC_ID_H263P:
837 s->out_format = FMT_H263;
840 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
841 s->modified_quant = s->h263_aic;
842 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
843 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
846 /* These are just to be sure */
850 case AV_CODEC_ID_FLV1:
851 s->out_format = FMT_H263;
852 s->h263_flv = 2; /* format = 1; 11-bit codes */
853 s->unrestricted_mv = 1;
854 s->rtp_mode = 0; /* don't allow GOB */
858 case AV_CODEC_ID_RV10:
859 s->out_format = FMT_H263;
863 case AV_CODEC_ID_RV20:
864 s->out_format = FMT_H263;
867 s->modified_quant = 1;
871 s->unrestricted_mv = 0;
873 case AV_CODEC_ID_MPEG4:
874 s->out_format = FMT_H263;
876 s->unrestricted_mv = 1;
877 s->low_delay = s->max_b_frames ? 0 : 1;
878 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
880 case AV_CODEC_ID_MSMPEG4V2:
881 s->out_format = FMT_H263;
883 s->unrestricted_mv = 1;
884 s->msmpeg4_version = 2;
888 case AV_CODEC_ID_MSMPEG4V3:
889 s->out_format = FMT_H263;
891 s->unrestricted_mv = 1;
892 s->msmpeg4_version = 3;
893 s->flipflop_rounding = 1;
897 case AV_CODEC_ID_WMV1:
898 s->out_format = FMT_H263;
900 s->unrestricted_mv = 1;
901 s->msmpeg4_version = 4;
902 s->flipflop_rounding = 1;
906 case AV_CODEC_ID_WMV2:
907 s->out_format = FMT_H263;
909 s->unrestricted_mv = 1;
910 s->msmpeg4_version = 5;
911 s->flipflop_rounding = 1;
919 #if FF_API_PRIVATE_OPT
920 FF_DISABLE_DEPRECATION_WARNINGS
921 if (avctx->noise_reduction)
922 s->noise_reduction = avctx->noise_reduction;
923 FF_ENABLE_DEPRECATION_WARNINGS
926 avctx->has_b_frames = !s->low_delay;
930 s->progressive_frame =
931 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
932 AV_CODEC_FLAG_INTERLACED_ME) ||
937 if (ff_mpv_common_init(s) < 0)
940 ff_fdctdsp_init(&s->fdsp, avctx);
941 ff_me_cmp_init(&s->mecc, avctx);
942 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
943 ff_pixblockdsp_init(&s->pdsp, avctx);
944 ff_qpeldsp_init(&s->qdsp);
946 if (s->msmpeg4_version) {
947 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
948 2 * 2 * (MAX_LEVEL + 1) *
949 (MAX_RUN + 1) * 2 * sizeof(int), fail);
951 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
953 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
954 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
955 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
956 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
957 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
958 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
959 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
960 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
961 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
962 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
965 if (s->noise_reduction) {
966 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
967 2 * 64 * sizeof(uint16_t), fail);
970 ff_dct_encode_init(s);
972 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
973 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
975 if (s->slice_context_count > 1) {
978 if (avctx->codec_id == AV_CODEC_ID_H263P)
979 s->h263_slice_structured = 1;
982 s->quant_precision = 5;
984 #if FF_API_PRIVATE_OPT
985 FF_DISABLE_DEPRECATION_WARNINGS
986 if (avctx->frame_skip_threshold)
987 s->frame_skip_threshold = avctx->frame_skip_threshold;
988 if (avctx->frame_skip_factor)
989 s->frame_skip_factor = avctx->frame_skip_factor;
990 if (avctx->frame_skip_exp)
991 s->frame_skip_exp = avctx->frame_skip_exp;
992 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
993 s->frame_skip_cmp = avctx->frame_skip_cmp;
994 FF_ENABLE_DEPRECATION_WARNINGS
997 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
998 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
1000 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
1001 ff_h261_encode_init(s);
1002 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
1003 ff_h263_encode_init(s);
1004 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
1005 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
1007 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1008 && s->out_format == FMT_MPEG1)
1009 ff_mpeg1_encode_init(s);
1012 for (i = 0; i < 64; i++) {
1013 int j = s->idsp.idct_permutation[i];
1014 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1016 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1017 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1018 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1019 s->intra_matrix[j] =
1020 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1023 s->chroma_intra_matrix[j] =
1024 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1025 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1027 if (s->avctx->intra_matrix)
1028 s->intra_matrix[j] = s->avctx->intra_matrix[i];
1029 if (s->avctx->inter_matrix)
1030 s->inter_matrix[j] = s->avctx->inter_matrix[i];
1033 /* precompute matrix */
1034 /* for mjpeg, we do include qscale in the matrix */
1035 if (s->out_format != FMT_MJPEG) {
1036 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1037 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1039 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1040 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1044 #if FF_API_RC_STRATEGY
1045 FF_DISABLE_DEPRECATION_WARNINGS
1046 if (!s->rc_strategy)
1047 s->rc_strategy = s->avctx->rc_strategy;
1048 FF_ENABLE_DEPRECATION_WARNINGS
1051 if (ff_rate_control_init(s) < 0)
1054 #if FF_API_RC_STRATEGY
1055 av_assert0(MPV_RC_STRATEGY_XVID == FF_RC_STRATEGY_XVID);
1058 if ((s->avctx->flags & AV_CODEC_FLAG_PASS2) && s->rc_strategy == MPV_RC_STRATEGY_XVID) {
1060 ret = ff_xvid_rate_control_init(s);
1062 ret = AVERROR(ENOSYS);
1063 av_log(s->avctx, AV_LOG_ERROR,
1064 "Xvid ratecontrol requires libavcodec compiled with Xvid support.\n");
1070 #if FF_API_ERROR_RATE
1071 FF_DISABLE_DEPRECATION_WARNINGS
1072 if (avctx->error_rate)
1073 s->error_rate = avctx->error_rate;
1074 FF_ENABLE_DEPRECATION_WARNINGS;
1077 #if FF_API_NORMALIZE_AQP
1078 FF_DISABLE_DEPRECATION_WARNINGS
1079 if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
1080 s->mpv_flags |= FF_MPV_FLAG_NAQ;
1081 FF_ENABLE_DEPRECATION_WARNINGS;
1085 FF_DISABLE_DEPRECATION_WARNINGS
1086 if (avctx->flags & CODEC_FLAG_MV0)
1087 s->mpv_flags |= FF_MPV_FLAG_MV0;
1088 FF_ENABLE_DEPRECATION_WARNINGS
1092 FF_DISABLE_DEPRECATION_WARNINGS
1093 if (avctx->rc_qsquish != 0.0)
1094 s->rc_qsquish = avctx->rc_qsquish;
1095 if (avctx->rc_qmod_amp != 0.0)
1096 s->rc_qmod_amp = avctx->rc_qmod_amp;
1097 if (avctx->rc_qmod_freq)
1098 s->rc_qmod_freq = avctx->rc_qmod_freq;
1099 if (avctx->rc_buffer_aggressivity != 1.0)
1100 s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
1101 if (avctx->rc_initial_cplx != 0.0)
1102 s->rc_initial_cplx = avctx->rc_initial_cplx;
1104 s->lmin = avctx->lmin;
1106 s->lmax = avctx->lmax;
1109 av_freep(&s->rc_eq);
1110 s->rc_eq = av_strdup(avctx->rc_eq);
1112 return AVERROR(ENOMEM);
1114 FF_ENABLE_DEPRECATION_WARNINGS
1117 #if FF_API_PRIVATE_OPT
1118 FF_DISABLE_DEPRECATION_WARNINGS
1119 if (avctx->brd_scale)
1120 s->brd_scale = avctx->brd_scale;
1122 if (avctx->prediction_method)
1123 s->pred = avctx->prediction_method + 1;
1124 FF_ENABLE_DEPRECATION_WARNINGS
1127 if (s->b_frame_strategy == 2) {
1128 for (i = 0; i < s->max_b_frames + 2; i++) {
1129 s->tmp_frames[i] = av_frame_alloc();
1130 if (!s->tmp_frames[i])
1131 return AVERROR(ENOMEM);
1133 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1134 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1135 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1137 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1143 cpb_props = ff_add_cpb_side_data(avctx);
1145 return AVERROR(ENOMEM);
1146 cpb_props->max_bitrate = avctx->rc_max_rate;
1147 cpb_props->min_bitrate = avctx->rc_min_rate;
1148 cpb_props->avg_bitrate = avctx->bit_rate;
1149 cpb_props->buffer_size = avctx->rc_buffer_size;
1153 ff_mpv_encode_end(avctx);
1154 return AVERROR_UNKNOWN;
1157 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1159 MpegEncContext *s = avctx->priv_data;
1162 ff_rate_control_uninit(s);
1164 if ((avctx->flags & AV_CODEC_FLAG_PASS2) && s->rc_strategy == MPV_RC_STRATEGY_XVID)
1165 ff_xvid_rate_control_uninit(s);
1168 ff_mpv_common_end(s);
1169 if (CONFIG_MJPEG_ENCODER &&
1170 s->out_format == FMT_MJPEG)
1171 ff_mjpeg_encode_close(s);
1173 av_freep(&avctx->extradata);
1175 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1176 av_frame_free(&s->tmp_frames[i]);
1178 ff_free_picture_tables(&s->new_picture);
1179 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1181 av_freep(&s->avctx->stats_out);
1182 av_freep(&s->ac_stats);
1184 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1185 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1186 s->q_chroma_intra_matrix= NULL;
1187 s->q_chroma_intra_matrix16= NULL;
1188 av_freep(&s->q_intra_matrix);
1189 av_freep(&s->q_inter_matrix);
1190 av_freep(&s->q_intra_matrix16);
1191 av_freep(&s->q_inter_matrix16);
1192 av_freep(&s->input_picture);
1193 av_freep(&s->reordered_input_picture);
1194 av_freep(&s->dct_offset);
1199 static int get_sae(uint8_t *src, int ref, int stride)
1204 for (y = 0; y < 16; y++) {
1205 for (x = 0; x < 16; x++) {
1206 acc += FFABS(src[x + y * stride] - ref);
1213 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1214 uint8_t *ref, int stride)
1220 h = s->height & ~15;
1222 for (y = 0; y < h; y += 16) {
1223 for (x = 0; x < w; x += 16) {
1224 int offset = x + y * stride;
1225 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1227 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1228 int sae = get_sae(src + offset, mean, stride);
1230 acc += sae + 500 < sad;
1236 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1238 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1239 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1240 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1241 &s->linesize, &s->uvlinesize);
1244 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1246 Picture *pic = NULL;
1248 int i, display_picture_number = 0, ret;
1249 int encoding_delay = s->max_b_frames ? s->max_b_frames
1250 : (s->low_delay ? 0 : 1);
1251 int flush_offset = 1;
1256 display_picture_number = s->input_picture_number++;
1258 if (pts != AV_NOPTS_VALUE) {
1259 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1260 int64_t last = s->user_specified_pts;
1263 av_log(s->avctx, AV_LOG_ERROR,
1264 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1266 return AVERROR(EINVAL);
1269 if (!s->low_delay && display_picture_number == 1)
1270 s->dts_delta = pts - last;
1272 s->user_specified_pts = pts;
1274 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1275 s->user_specified_pts =
1276 pts = s->user_specified_pts + 1;
1277 av_log(s->avctx, AV_LOG_INFO,
1278 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1281 pts = display_picture_number;
1285 if (!pic_arg->buf[0] ||
1286 pic_arg->linesize[0] != s->linesize ||
1287 pic_arg->linesize[1] != s->uvlinesize ||
1288 pic_arg->linesize[2] != s->uvlinesize)
1290 if ((s->width & 15) || (s->height & 15))
1292 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1294 if (s->linesize & (STRIDE_ALIGN-1))
1297 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1298 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1300 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1304 pic = &s->picture[i];
1308 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1311 ret = alloc_picture(s, pic, direct);
1316 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1317 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1318 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1321 int h_chroma_shift, v_chroma_shift;
1322 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1326 for (i = 0; i < 3; i++) {
1327 int src_stride = pic_arg->linesize[i];
1328 int dst_stride = i ? s->uvlinesize : s->linesize;
1329 int h_shift = i ? h_chroma_shift : 0;
1330 int v_shift = i ? v_chroma_shift : 0;
1331 int w = s->width >> h_shift;
1332 int h = s->height >> v_shift;
1333 uint8_t *src = pic_arg->data[i];
1334 uint8_t *dst = pic->f->data[i];
1337 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1338 && !s->progressive_sequence
1339 && FFALIGN(s->height, 32) - s->height > 16)
1342 if (!s->avctx->rc_buffer_size)
1343 dst += INPLACE_OFFSET;
1345 if (src_stride == dst_stride)
1346 memcpy(dst, src, src_stride * h);
1349 uint8_t *dst2 = dst;
1351 memcpy(dst2, src, w);
1356 if ((s->width & 15) || (s->height & (vpad-1))) {
1357 s->mpvencdsp.draw_edges(dst, dst_stride,
1367 ret = av_frame_copy_props(pic->f, pic_arg);
1371 pic->f->display_picture_number = display_picture_number;
1372 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1374 /* Flushing: When we have not received enough input frames,
1375 * ensure s->input_picture[0] contains the first picture */
1376 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1377 if (s->input_picture[flush_offset])
1380 if (flush_offset <= 1)
1383 encoding_delay = encoding_delay - flush_offset + 1;
1386 /* shift buffer entries */
1387 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1388 s->input_picture[i - flush_offset] = s->input_picture[i];
1390 s->input_picture[encoding_delay] = (Picture*) pic;
1395 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1399 int64_t score64 = 0;
1401 for (plane = 0; plane < 3; plane++) {
1402 const int stride = p->f->linesize[plane];
1403 const int bw = plane ? 1 : 2;
1404 for (y = 0; y < s->mb_height * bw; y++) {
1405 for (x = 0; x < s->mb_width * bw; x++) {
1406 int off = p->shared ? 0 : 16;
1407 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1408 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1409 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1411 switch (FFABS(s->frame_skip_exp)) {
1412 case 0: score = FFMAX(score, v); break;
1413 case 1: score += FFABS(v); break;
1414 case 2: score64 += v * (int64_t)v; break;
1415 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1416 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1425 if (s->frame_skip_exp < 0)
1426 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1427 -1.0/s->frame_skip_exp);
1429 if (score64 < s->frame_skip_threshold)
1431 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1436 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1438 AVPacket pkt = { 0 };
1442 av_init_packet(&pkt);
1444 ret = avcodec_send_frame(c, frame);
1449 ret = avcodec_receive_packet(c, &pkt);
1452 av_packet_unref(&pkt);
1453 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1460 static int estimate_best_b_count(MpegEncContext *s)
1462 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1463 const int scale = s->brd_scale;
1464 int width = s->width >> scale;
1465 int height = s->height >> scale;
1466 int i, j, out_size, p_lambda, b_lambda, lambda2;
1467 int64_t best_rd = INT64_MAX;
1468 int best_b_count = -1;
1471 av_assert0(scale >= 0 && scale <= 3);
1474 //s->next_picture_ptr->quality;
1475 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1476 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1477 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1478 if (!b_lambda) // FIXME we should do this somewhere else
1479 b_lambda = p_lambda;
1480 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1483 for (i = 0; i < s->max_b_frames + 2; i++) {
1484 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1485 s->next_picture_ptr;
1488 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1489 pre_input = *pre_input_ptr;
1490 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1492 if (!pre_input.shared && i) {
1493 data[0] += INPLACE_OFFSET;
1494 data[1] += INPLACE_OFFSET;
1495 data[2] += INPLACE_OFFSET;
1498 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1499 s->tmp_frames[i]->linesize[0],
1501 pre_input.f->linesize[0],
1503 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1504 s->tmp_frames[i]->linesize[1],
1506 pre_input.f->linesize[1],
1507 width >> 1, height >> 1);
1508 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1509 s->tmp_frames[i]->linesize[2],
1511 pre_input.f->linesize[2],
1512 width >> 1, height >> 1);
1516 for (j = 0; j < s->max_b_frames + 1; j++) {
1520 if (!s->input_picture[j])
1523 c = avcodec_alloc_context3(NULL);
1525 return AVERROR(ENOMEM);
1529 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1530 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1531 c->mb_decision = s->avctx->mb_decision;
1532 c->me_cmp = s->avctx->me_cmp;
1533 c->mb_cmp = s->avctx->mb_cmp;
1534 c->me_sub_cmp = s->avctx->me_sub_cmp;
1535 c->pix_fmt = AV_PIX_FMT_YUV420P;
1536 c->time_base = s->avctx->time_base;
1537 c->max_b_frames = s->max_b_frames;
1539 ret = avcodec_open2(c, codec, NULL);
1543 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1544 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1546 out_size = encode_frame(c, s->tmp_frames[0]);
1552 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1554 for (i = 0; i < s->max_b_frames + 1; i++) {
1555 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1557 s->tmp_frames[i + 1]->pict_type = is_p ?
1558 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1559 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1561 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1567 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1570 /* get the delayed frames */
1571 out_size = encode_frame(c, NULL);
1576 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1578 rd += c->error[0] + c->error[1] + c->error[2];
1586 avcodec_free_context(&c);
1591 return best_b_count;
1594 static int select_input_picture(MpegEncContext *s)
1598 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1599 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1600 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1602 /* set next picture type & ordering */
1603 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1604 if (s->frame_skip_threshold || s->frame_skip_factor) {
1605 if (s->picture_in_gop_number < s->gop_size &&
1606 s->next_picture_ptr &&
1607 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1608 // FIXME check that the gop check above is +-1 correct
1609 av_frame_unref(s->input_picture[0]->f);
1611 ff_vbv_update(s, 0);
1617 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1618 !s->next_picture_ptr || s->intra_only) {
1619 s->reordered_input_picture[0] = s->input_picture[0];
1620 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1621 s->reordered_input_picture[0]->f->coded_picture_number =
1622 s->coded_picture_number++;
1626 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1627 for (i = 0; i < s->max_b_frames + 1; i++) {
1628 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1630 if (pict_num >= s->rc_context.num_entries)
1632 if (!s->input_picture[i]) {
1633 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1637 s->input_picture[i]->f->pict_type =
1638 s->rc_context.entry[pict_num].new_pict_type;
1642 if (s->b_frame_strategy == 0) {
1643 b_frames = s->max_b_frames;
1644 while (b_frames && !s->input_picture[b_frames])
1646 } else if (s->b_frame_strategy == 1) {
1647 for (i = 1; i < s->max_b_frames + 1; i++) {
1648 if (s->input_picture[i] &&
1649 s->input_picture[i]->b_frame_score == 0) {
1650 s->input_picture[i]->b_frame_score =
1652 s->input_picture[i ]->f->data[0],
1653 s->input_picture[i - 1]->f->data[0],
1657 for (i = 0; i < s->max_b_frames + 1; i++) {
1658 if (!s->input_picture[i] ||
1659 s->input_picture[i]->b_frame_score - 1 >
1660 s->mb_num / s->b_sensitivity)
1664 b_frames = FFMAX(0, i - 1);
1667 for (i = 0; i < b_frames + 1; i++) {
1668 s->input_picture[i]->b_frame_score = 0;
1670 } else if (s->b_frame_strategy == 2) {
1671 b_frames = estimate_best_b_count(s);
1678 for (i = b_frames - 1; i >= 0; i--) {
1679 int type = s->input_picture[i]->f->pict_type;
1680 if (type && type != AV_PICTURE_TYPE_B)
1683 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1684 b_frames == s->max_b_frames) {
1685 av_log(s->avctx, AV_LOG_ERROR,
1686 "warning, too many B-frames in a row\n");
1689 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1690 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1691 s->gop_size > s->picture_in_gop_number) {
1692 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1694 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1696 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1700 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1701 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1704 s->reordered_input_picture[0] = s->input_picture[b_frames];
1705 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1706 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1707 s->reordered_input_picture[0]->f->coded_picture_number =
1708 s->coded_picture_number++;
1709 for (i = 0; i < b_frames; i++) {
1710 s->reordered_input_picture[i + 1] = s->input_picture[i];
1711 s->reordered_input_picture[i + 1]->f->pict_type =
1713 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1714 s->coded_picture_number++;
1719 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1721 if (s->reordered_input_picture[0]) {
1722 s->reordered_input_picture[0]->reference =
1723 s->reordered_input_picture[0]->f->pict_type !=
1724 AV_PICTURE_TYPE_B ? 3 : 0;
1726 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1729 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1730 // input is a shared pix, so we can't modify it -> allocate a new
1731 // one & ensure that the shared one is reuseable
1734 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1737 pic = &s->picture[i];
1739 pic->reference = s->reordered_input_picture[0]->reference;
1740 if (alloc_picture(s, pic, 0) < 0) {
1744 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1748 /* mark us unused / free shared pic */
1749 av_frame_unref(s->reordered_input_picture[0]->f);
1750 s->reordered_input_picture[0]->shared = 0;
1752 s->current_picture_ptr = pic;
1754 // input is not a shared pix -> reuse buffer for current_pix
1755 s->current_picture_ptr = s->reordered_input_picture[0];
1756 for (i = 0; i < 4; i++) {
1757 s->new_picture.f->data[i] += INPLACE_OFFSET;
1760 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1761 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1762 s->current_picture_ptr)) < 0)
1765 s->picture_number = s->new_picture.f->display_picture_number;
1770 static void frame_end(MpegEncContext *s)
1772 if (s->unrestricted_mv &&
1773 s->current_picture.reference &&
1775 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1776 int hshift = desc->log2_chroma_w;
1777 int vshift = desc->log2_chroma_h;
1778 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1779 s->current_picture.f->linesize[0],
1780 s->h_edge_pos, s->v_edge_pos,
1781 EDGE_WIDTH, EDGE_WIDTH,
1782 EDGE_TOP | EDGE_BOTTOM);
1783 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1784 s->current_picture.f->linesize[1],
1785 s->h_edge_pos >> hshift,
1786 s->v_edge_pos >> vshift,
1787 EDGE_WIDTH >> hshift,
1788 EDGE_WIDTH >> vshift,
1789 EDGE_TOP | EDGE_BOTTOM);
1790 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1791 s->current_picture.f->linesize[2],
1792 s->h_edge_pos >> hshift,
1793 s->v_edge_pos >> vshift,
1794 EDGE_WIDTH >> hshift,
1795 EDGE_WIDTH >> vshift,
1796 EDGE_TOP | EDGE_BOTTOM);
1801 s->last_pict_type = s->pict_type;
1802 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1803 if (s->pict_type!= AV_PICTURE_TYPE_B)
1804 s->last_non_b_pict_type = s->pict_type;
1806 #if FF_API_CODED_FRAME
1807 FF_DISABLE_DEPRECATION_WARNINGS
1808 av_frame_unref(s->avctx->coded_frame);
1809 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1810 FF_ENABLE_DEPRECATION_WARNINGS
1812 #if FF_API_ERROR_FRAME
1813 FF_DISABLE_DEPRECATION_WARNINGS
1814 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1815 sizeof(s->current_picture.encoding_error));
1816 FF_ENABLE_DEPRECATION_WARNINGS
1820 static void update_noise_reduction(MpegEncContext *s)
1824 for (intra = 0; intra < 2; intra++) {
1825 if (s->dct_count[intra] > (1 << 16)) {
1826 for (i = 0; i < 64; i++) {
1827 s->dct_error_sum[intra][i] >>= 1;
1829 s->dct_count[intra] >>= 1;
1832 for (i = 0; i < 64; i++) {
1833 s->dct_offset[intra][i] = (s->noise_reduction *
1834 s->dct_count[intra] +
1835 s->dct_error_sum[intra][i] / 2) /
1836 (s->dct_error_sum[intra][i] + 1);
1841 static int frame_start(MpegEncContext *s)
1845 /* mark & release old frames */
1846 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1847 s->last_picture_ptr != s->next_picture_ptr &&
1848 s->last_picture_ptr->f->buf[0]) {
1849 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1852 s->current_picture_ptr->f->pict_type = s->pict_type;
1853 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1855 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1856 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1857 s->current_picture_ptr)) < 0)
1860 if (s->pict_type != AV_PICTURE_TYPE_B) {
1861 s->last_picture_ptr = s->next_picture_ptr;
1863 s->next_picture_ptr = s->current_picture_ptr;
1866 if (s->last_picture_ptr) {
1867 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1868 if (s->last_picture_ptr->f->buf[0] &&
1869 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1870 s->last_picture_ptr)) < 0)
1873 if (s->next_picture_ptr) {
1874 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1875 if (s->next_picture_ptr->f->buf[0] &&
1876 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1877 s->next_picture_ptr)) < 0)
1881 if (s->picture_structure!= PICT_FRAME) {
1883 for (i = 0; i < 4; i++) {
1884 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1885 s->current_picture.f->data[i] +=
1886 s->current_picture.f->linesize[i];
1888 s->current_picture.f->linesize[i] *= 2;
1889 s->last_picture.f->linesize[i] *= 2;
1890 s->next_picture.f->linesize[i] *= 2;
1894 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1895 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1896 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1897 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1898 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1899 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1901 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1902 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1905 if (s->dct_error_sum) {
1906 av_assert2(s->noise_reduction && s->encoding);
1907 update_noise_reduction(s);
1913 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1914 const AVFrame *pic_arg, int *got_packet)
1916 MpegEncContext *s = avctx->priv_data;
1917 int i, stuffing_count, ret;
1918 int context_count = s->slice_context_count;
1920 s->vbv_ignore_qmax = 0;
1922 s->picture_in_gop_number++;
1924 if (load_input_picture(s, pic_arg) < 0)
1927 if (select_input_picture(s) < 0) {
1932 if (s->new_picture.f->data[0]) {
1933 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1934 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1936 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1937 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1940 s->mb_info_ptr = av_packet_new_side_data(pkt,
1941 AV_PKT_DATA_H263_MB_INFO,
1942 s->mb_width*s->mb_height*12);
1943 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1946 for (i = 0; i < context_count; i++) {
1947 int start_y = s->thread_context[i]->start_mb_y;
1948 int end_y = s->thread_context[i]-> end_mb_y;
1949 int h = s->mb_height;
1950 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1951 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1953 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1956 s->pict_type = s->new_picture.f->pict_type;
1958 ret = frame_start(s);
1962 ret = encode_picture(s, s->picture_number);
1963 if (growing_buffer) {
1964 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1965 pkt->data = s->pb.buf;
1966 pkt->size = avctx->internal->byte_buffer_size;
1971 #if FF_API_STAT_BITS
1972 FF_DISABLE_DEPRECATION_WARNINGS
1973 avctx->header_bits = s->header_bits;
1974 avctx->mv_bits = s->mv_bits;
1975 avctx->misc_bits = s->misc_bits;
1976 avctx->i_tex_bits = s->i_tex_bits;
1977 avctx->p_tex_bits = s->p_tex_bits;
1978 avctx->i_count = s->i_count;
1979 // FIXME f/b_count in avctx
1980 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1981 avctx->skip_count = s->skip_count;
1982 FF_ENABLE_DEPRECATION_WARNINGS
1987 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1988 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1990 if (avctx->rc_buffer_size) {
1991 RateControlContext *rcc = &s->rc_context;
1992 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1993 int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1994 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1996 if (put_bits_count(&s->pb) > max_size &&
1997 s->lambda < s->lmax) {
1998 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1999 (s->qscale + 1) / s->qscale);
2000 if (s->adaptive_quant) {
2002 for (i = 0; i < s->mb_height * s->mb_stride; i++)
2003 s->lambda_table[i] =
2004 FFMAX(s->lambda_table[i] + min_step,
2005 s->lambda_table[i] * (s->qscale + 1) /
2008 s->mb_skipped = 0; // done in frame_start()
2009 // done in encode_picture() so we must undo it
2010 if (s->pict_type == AV_PICTURE_TYPE_P) {
2011 if (s->flipflop_rounding ||
2012 s->codec_id == AV_CODEC_ID_H263P ||
2013 s->codec_id == AV_CODEC_ID_MPEG4)
2014 s->no_rounding ^= 1;
2016 if (s->pict_type != AV_PICTURE_TYPE_B) {
2017 s->time_base = s->last_time_base;
2018 s->last_non_b_time = s->time - s->pp_time;
2020 for (i = 0; i < context_count; i++) {
2021 PutBitContext *pb = &s->thread_context[i]->pb;
2022 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
2024 s->vbv_ignore_qmax = 1;
2025 av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
2029 av_assert0(s->avctx->rc_max_rate);
2032 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
2033 ff_write_pass1_stats(s);
2035 for (i = 0; i < 4; i++) {
2036 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
2037 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
2039 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
2040 s->current_picture_ptr->encoding_error,
2041 (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
2044 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
2045 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
2046 s->misc_bits + s->i_tex_bits +
2048 flush_put_bits(&s->pb);
2049 s->frame_bits = put_bits_count(&s->pb);
2051 stuffing_count = ff_vbv_update(s, s->frame_bits);
2052 s->stuffing_bits = 8*stuffing_count;
2053 if (stuffing_count) {
2054 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
2055 stuffing_count + 50) {
2056 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
2060 switch (s->codec_id) {
2061 case AV_CODEC_ID_MPEG1VIDEO:
2062 case AV_CODEC_ID_MPEG2VIDEO:
2063 while (stuffing_count--) {
2064 put_bits(&s->pb, 8, 0);
2067 case AV_CODEC_ID_MPEG4:
2068 put_bits(&s->pb, 16, 0);
2069 put_bits(&s->pb, 16, 0x1C3);
2070 stuffing_count -= 4;
2071 while (stuffing_count--) {
2072 put_bits(&s->pb, 8, 0xFF);
2076 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2078 flush_put_bits(&s->pb);
2079 s->frame_bits = put_bits_count(&s->pb);
2082 /* update MPEG-1/2 vbv_delay for CBR */
2083 if (s->avctx->rc_max_rate &&
2084 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
2085 s->out_format == FMT_MPEG1 &&
2086 90000LL * (avctx->rc_buffer_size - 1) <=
2087 s->avctx->rc_max_rate * 0xFFFFLL) {
2088 AVCPBProperties *props;
2091 int vbv_delay, min_delay;
2092 double inbits = s->avctx->rc_max_rate *
2093 av_q2d(s->avctx->time_base);
2094 int minbits = s->frame_bits - 8 *
2095 (s->vbv_delay_ptr - s->pb.buf - 1);
2096 double bits = s->rc_context.buffer_index + minbits - inbits;
2099 av_log(s->avctx, AV_LOG_ERROR,
2100 "Internal error, negative bits\n");
2102 assert(s->repeat_first_field == 0);
2104 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
2105 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
2106 s->avctx->rc_max_rate;
2108 vbv_delay = FFMAX(vbv_delay, min_delay);
2110 av_assert0(vbv_delay < 0xFFFF);
2112 s->vbv_delay_ptr[0] &= 0xF8;
2113 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2114 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2115 s->vbv_delay_ptr[2] &= 0x07;
2116 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2118 props = av_cpb_properties_alloc(&props_size);
2120 return AVERROR(ENOMEM);
2121 props->vbv_delay = vbv_delay * 300;
2123 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2124 (uint8_t*)props, props_size);
2130 #if FF_API_VBV_DELAY
2131 FF_DISABLE_DEPRECATION_WARNINGS
2132 avctx->vbv_delay = vbv_delay * 300;
2133 FF_ENABLE_DEPRECATION_WARNINGS
2136 s->total_bits += s->frame_bits;
2137 #if FF_API_STAT_BITS
2138 FF_DISABLE_DEPRECATION_WARNINGS
2139 avctx->frame_bits = s->frame_bits;
2140 FF_ENABLE_DEPRECATION_WARNINGS
2144 pkt->pts = s->current_picture.f->pts;
2145 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2146 if (!s->current_picture.f->coded_picture_number)
2147 pkt->dts = pkt->pts - s->dts_delta;
2149 pkt->dts = s->reordered_pts;
2150 s->reordered_pts = pkt->pts;
2152 pkt->dts = pkt->pts;
2153 if (s->current_picture.f->key_frame)
2154 pkt->flags |= AV_PKT_FLAG_KEY;
2156 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2161 /* release non-reference frames */
2162 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2163 if (!s->picture[i].reference)
2164 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2167 av_assert1((s->frame_bits & 7) == 0);
2169 pkt->size = s->frame_bits / 8;
2170 *got_packet = !!pkt->size;
2174 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2175 int n, int threshold)
2177 static const char tab[64] = {
2178 3, 2, 2, 1, 1, 1, 1, 1,
2179 1, 1, 1, 1, 1, 1, 1, 1,
2180 1, 1, 1, 1, 1, 1, 1, 1,
2181 0, 0, 0, 0, 0, 0, 0, 0,
2182 0, 0, 0, 0, 0, 0, 0, 0,
2183 0, 0, 0, 0, 0, 0, 0, 0,
2184 0, 0, 0, 0, 0, 0, 0, 0,
2185 0, 0, 0, 0, 0, 0, 0, 0
2190 int16_t *block = s->block[n];
2191 const int last_index = s->block_last_index[n];
2194 if (threshold < 0) {
2196 threshold = -threshold;
2200 /* Are all we could set to zero already zero? */
2201 if (last_index <= skip_dc - 1)
2204 for (i = 0; i <= last_index; i++) {
2205 const int j = s->intra_scantable.permutated[i];
2206 const int level = FFABS(block[j]);
2208 if (skip_dc && i == 0)
2212 } else if (level > 1) {
2218 if (score >= threshold)
2220 for (i = skip_dc; i <= last_index; i++) {
2221 const int j = s->intra_scantable.permutated[i];
2225 s->block_last_index[n] = 0;
2227 s->block_last_index[n] = -1;
2230 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2234 const int maxlevel = s->max_qcoeff;
2235 const int minlevel = s->min_qcoeff;
2239 i = 1; // skip clipping of intra dc
2243 for (; i <= last_index; i++) {
2244 const int j = s->intra_scantable.permutated[i];
2245 int level = block[j];
2247 if (level > maxlevel) {
2250 } else if (level < minlevel) {
2258 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2259 av_log(s->avctx, AV_LOG_INFO,
2260 "warning, clipping %d dct coefficients to %d..%d\n",
2261 overflow, minlevel, maxlevel);
2264 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2268 for (y = 0; y < 8; y++) {
2269 for (x = 0; x < 8; x++) {
2275 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2276 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2277 int v = ptr[x2 + y2 * stride];
2283 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2288 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2289 int motion_x, int motion_y,
2290 int mb_block_height,
2294 int16_t weight[12][64];
2295 int16_t orig[12][64];
2296 const int mb_x = s->mb_x;
2297 const int mb_y = s->mb_y;
2300 int dct_offset = s->linesize * 8; // default for progressive frames
2301 int uv_dct_offset = s->uvlinesize * 8;
2302 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2303 ptrdiff_t wrap_y, wrap_c;
2305 for (i = 0; i < mb_block_count; i++)
2306 skip_dct[i] = s->skipdct;
2308 if (s->adaptive_quant) {
2309 const int last_qp = s->qscale;
2310 const int mb_xy = mb_x + mb_y * s->mb_stride;
2312 s->lambda = s->lambda_table[mb_xy];
2315 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2316 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2317 s->dquant = s->qscale - last_qp;
2319 if (s->out_format == FMT_H263) {
2320 s->dquant = av_clip(s->dquant, -2, 2);
2322 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2324 if (s->pict_type == AV_PICTURE_TYPE_B) {
2325 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2328 if (s->mv_type == MV_TYPE_8X8)
2334 ff_set_qscale(s, last_qp + s->dquant);
2335 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2336 ff_set_qscale(s, s->qscale + s->dquant);
2338 wrap_y = s->linesize;
2339 wrap_c = s->uvlinesize;
2340 ptr_y = s->new_picture.f->data[0] +
2341 (mb_y * 16 * wrap_y) + mb_x * 16;
2342 ptr_cb = s->new_picture.f->data[1] +
2343 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2344 ptr_cr = s->new_picture.f->data[2] +
2345 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2347 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2348 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2349 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2350 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2351 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2353 16, 16, mb_x * 16, mb_y * 16,
2354 s->width, s->height);
2356 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2358 mb_block_width, mb_block_height,
2359 mb_x * mb_block_width, mb_y * mb_block_height,
2361 ptr_cb = ebuf + 16 * wrap_y;
2362 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2364 mb_block_width, mb_block_height,
2365 mb_x * mb_block_width, mb_y * mb_block_height,
2367 ptr_cr = ebuf + 16 * wrap_y + 16;
2371 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2372 int progressive_score, interlaced_score;
2374 s->interlaced_dct = 0;
2375 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2376 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2377 NULL, wrap_y, 8) - 400;
2379 if (progressive_score > 0) {
2380 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2381 NULL, wrap_y * 2, 8) +
2382 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2383 NULL, wrap_y * 2, 8);
2384 if (progressive_score > interlaced_score) {
2385 s->interlaced_dct = 1;
2387 dct_offset = wrap_y;
2388 uv_dct_offset = wrap_c;
2390 if (s->chroma_format == CHROMA_422 ||
2391 s->chroma_format == CHROMA_444)
2397 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2398 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2399 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2400 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2402 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2406 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2407 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2408 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2409 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2410 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2411 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2412 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2413 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2414 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2415 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2416 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2417 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2421 op_pixels_func (*op_pix)[4];
2422 qpel_mc_func (*op_qpix)[16];
2423 uint8_t *dest_y, *dest_cb, *dest_cr;
2425 dest_y = s->dest[0];
2426 dest_cb = s->dest[1];
2427 dest_cr = s->dest[2];
2429 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2430 op_pix = s->hdsp.put_pixels_tab;
2431 op_qpix = s->qdsp.put_qpel_pixels_tab;
2433 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2434 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2437 if (s->mv_dir & MV_DIR_FORWARD) {
2438 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2439 s->last_picture.f->data,
2441 op_pix = s->hdsp.avg_pixels_tab;
2442 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2444 if (s->mv_dir & MV_DIR_BACKWARD) {
2445 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2446 s->next_picture.f->data,
2450 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2451 int progressive_score, interlaced_score;
2453 s->interlaced_dct = 0;
2454 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2455 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2459 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2460 progressive_score -= 400;
2462 if (progressive_score > 0) {
2463 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2465 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2469 if (progressive_score > interlaced_score) {
2470 s->interlaced_dct = 1;
2472 dct_offset = wrap_y;
2473 uv_dct_offset = wrap_c;
2475 if (s->chroma_format == CHROMA_422)
2481 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2482 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2483 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2484 dest_y + dct_offset, wrap_y);
2485 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2486 dest_y + dct_offset + 8, wrap_y);
2488 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2492 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2493 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2494 if (!s->chroma_y_shift) { /* 422 */
2495 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2496 dest_cb + uv_dct_offset, wrap_c);
2497 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2498 dest_cr + uv_dct_offset, wrap_c);
2501 /* pre quantization */
2502 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2503 2 * s->qscale * s->qscale) {
2505 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2507 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2509 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2510 wrap_y, 8) < 20 * s->qscale)
2512 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2513 wrap_y, 8) < 20 * s->qscale)
2515 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2517 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2519 if (!s->chroma_y_shift) { /* 422 */
2520 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2521 dest_cb + uv_dct_offset,
2522 wrap_c, 8) < 20 * s->qscale)
2524 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2525 dest_cr + uv_dct_offset,
2526 wrap_c, 8) < 20 * s->qscale)
2532 if (s->quantizer_noise_shaping) {
2534 get_visual_weight(weight[0], ptr_y , wrap_y);
2536 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2538 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2540 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2542 get_visual_weight(weight[4], ptr_cb , wrap_c);
2544 get_visual_weight(weight[5], ptr_cr , wrap_c);
2545 if (!s->chroma_y_shift) { /* 422 */
2547 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2550 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2553 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2556 /* DCT & quantize */
2557 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2559 for (i = 0; i < mb_block_count; i++) {
2562 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2563 // FIXME we could decide to change to quantizer instead of
2565 // JS: I don't think that would be a good idea it could lower
2566 // quality instead of improve it. Just INTRADC clipping
2567 // deserves changes in quantizer
2569 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2571 s->block_last_index[i] = -1;
2573 if (s->quantizer_noise_shaping) {
2574 for (i = 0; i < mb_block_count; i++) {
2576 s->block_last_index[i] =
2577 dct_quantize_refine(s, s->block[i], weight[i],
2578 orig[i], i, s->qscale);
2583 if (s->luma_elim_threshold && !s->mb_intra)
2584 for (i = 0; i < 4; i++)
2585 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2586 if (s->chroma_elim_threshold && !s->mb_intra)
2587 for (i = 4; i < mb_block_count; i++)
2588 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2590 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2591 for (i = 0; i < mb_block_count; i++) {
2592 if (s->block_last_index[i] == -1)
2593 s->coded_score[i] = INT_MAX / 256;
2598 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2599 s->block_last_index[4] =
2600 s->block_last_index[5] = 0;
2602 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2603 if (!s->chroma_y_shift) { /* 422 / 444 */
2604 for (i=6; i<12; i++) {
2605 s->block_last_index[i] = 0;
2606 s->block[i][0] = s->block[4][0];
2611 // non c quantize code returns incorrect block_last_index FIXME
2612 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2613 for (i = 0; i < mb_block_count; i++) {
2615 if (s->block_last_index[i] > 0) {
2616 for (j = 63; j > 0; j--) {
2617 if (s->block[i][s->intra_scantable.permutated[j]])
2620 s->block_last_index[i] = j;
2625 /* huffman encode */
2626 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2627 case AV_CODEC_ID_MPEG1VIDEO:
2628 case AV_CODEC_ID_MPEG2VIDEO:
2629 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2630 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2632 case AV_CODEC_ID_MPEG4:
2633 if (CONFIG_MPEG4_ENCODER)
2634 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2636 case AV_CODEC_ID_MSMPEG4V2:
2637 case AV_CODEC_ID_MSMPEG4V3:
2638 case AV_CODEC_ID_WMV1:
2639 if (CONFIG_MSMPEG4_ENCODER)
2640 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2642 case AV_CODEC_ID_WMV2:
2643 if (CONFIG_WMV2_ENCODER)
2644 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2646 case AV_CODEC_ID_H261:
2647 if (CONFIG_H261_ENCODER)
2648 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2650 case AV_CODEC_ID_H263:
2651 case AV_CODEC_ID_H263P:
2652 case AV_CODEC_ID_FLV1:
2653 case AV_CODEC_ID_RV10:
2654 case AV_CODEC_ID_RV20:
2655 if (CONFIG_H263_ENCODER)
2656 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2658 case AV_CODEC_ID_MJPEG:
2659 case AV_CODEC_ID_AMV:
2660 if (CONFIG_MJPEG_ENCODER)
2661 ff_mjpeg_encode_mb(s, s->block);
2668 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2670 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2671 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2672 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2675 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2678 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2681 d->mb_skip_run= s->mb_skip_run;
2683 d->last_dc[i] = s->last_dc[i];
2686 d->mv_bits= s->mv_bits;
2687 d->i_tex_bits= s->i_tex_bits;
2688 d->p_tex_bits= s->p_tex_bits;
2689 d->i_count= s->i_count;
2690 d->f_count= s->f_count;
2691 d->b_count= s->b_count;
2692 d->skip_count= s->skip_count;
2693 d->misc_bits= s->misc_bits;
2697 d->qscale= s->qscale;
2698 d->dquant= s->dquant;
2700 d->esc3_level_length= s->esc3_level_length;
2703 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2706 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2707 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2710 d->mb_skip_run= s->mb_skip_run;
2712 d->last_dc[i] = s->last_dc[i];
2715 d->mv_bits= s->mv_bits;
2716 d->i_tex_bits= s->i_tex_bits;
2717 d->p_tex_bits= s->p_tex_bits;
2718 d->i_count= s->i_count;
2719 d->f_count= s->f_count;
2720 d->b_count= s->b_count;
2721 d->skip_count= s->skip_count;
2722 d->misc_bits= s->misc_bits;
2724 d->mb_intra= s->mb_intra;
2725 d->mb_skipped= s->mb_skipped;
2726 d->mv_type= s->mv_type;
2727 d->mv_dir= s->mv_dir;
2729 if(s->data_partitioning){
2731 d->tex_pb= s->tex_pb;
2735 d->block_last_index[i]= s->block_last_index[i];
2736 d->interlaced_dct= s->interlaced_dct;
2737 d->qscale= s->qscale;
2739 d->esc3_level_length= s->esc3_level_length;
2742 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2743 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2744 int *dmin, int *next_block, int motion_x, int motion_y)
2747 uint8_t *dest_backup[3];
2749 copy_context_before_encode(s, backup, type);
2751 s->block= s->blocks[*next_block];
2752 s->pb= pb[*next_block];
2753 if(s->data_partitioning){
2754 s->pb2 = pb2 [*next_block];
2755 s->tex_pb= tex_pb[*next_block];
2759 memcpy(dest_backup, s->dest, sizeof(s->dest));
2760 s->dest[0] = s->sc.rd_scratchpad;
2761 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2762 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2763 av_assert0(s->linesize >= 32); //FIXME
2766 encode_mb(s, motion_x, motion_y);
2768 score= put_bits_count(&s->pb);
2769 if(s->data_partitioning){
2770 score+= put_bits_count(&s->pb2);
2771 score+= put_bits_count(&s->tex_pb);
2774 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2775 ff_mpv_decode_mb(s, s->block);
2777 score *= s->lambda2;
2778 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2782 memcpy(s->dest, dest_backup, sizeof(s->dest));
2789 copy_context_after_encode(best, s, type);
2793 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2794 uint32_t *sq = ff_square_tab + 256;
2799 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2800 else if(w==8 && h==8)
2801 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2805 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2814 static int sse_mb(MpegEncContext *s){
2818 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2819 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2822 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2823 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2824 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2825 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2827 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2828 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2829 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2832 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2833 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2834 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2837 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2838 MpegEncContext *s= *(void**)arg;
2842 s->me.dia_size= s->avctx->pre_dia_size;
2843 s->first_slice_line=1;
2844 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2845 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2846 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2848 s->first_slice_line=0;
2856 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2857 MpegEncContext *s= *(void**)arg;
2859 ff_check_alignment();
2861 s->me.dia_size= s->avctx->dia_size;
2862 s->first_slice_line=1;
2863 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2864 s->mb_x=0; //for block init below
2865 ff_init_block_index(s);
2866 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2867 s->block_index[0]+=2;
2868 s->block_index[1]+=2;
2869 s->block_index[2]+=2;
2870 s->block_index[3]+=2;
2872 /* compute motion vector & mb_type and store in context */
2873 if(s->pict_type==AV_PICTURE_TYPE_B)
2874 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2876 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2878 s->first_slice_line=0;
2883 static int mb_var_thread(AVCodecContext *c, void *arg){
2884 MpegEncContext *s= *(void**)arg;
2887 ff_check_alignment();
2889 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2890 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2893 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2895 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2897 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2898 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2900 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2901 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2902 s->me.mb_var_sum_temp += varc;
2908 static void write_slice_end(MpegEncContext *s){
2909 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2910 if(s->partitioned_frame){
2911 ff_mpeg4_merge_partitions(s);
2914 ff_mpeg4_stuffing(&s->pb);
2915 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2916 ff_mjpeg_encode_stuffing(s);
2919 avpriv_align_put_bits(&s->pb);
2920 flush_put_bits(&s->pb);
2922 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2923 s->misc_bits+= get_bits_diff(s);
2926 static void write_mb_info(MpegEncContext *s)
2928 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2929 int offset = put_bits_count(&s->pb);
2930 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2931 int gobn = s->mb_y / s->gob_index;
2933 if (CONFIG_H263_ENCODER)
2934 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2935 bytestream_put_le32(&ptr, offset);
2936 bytestream_put_byte(&ptr, s->qscale);
2937 bytestream_put_byte(&ptr, gobn);
2938 bytestream_put_le16(&ptr, mba);
2939 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2940 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2941 /* 4MV not implemented */
2942 bytestream_put_byte(&ptr, 0); /* hmv2 */
2943 bytestream_put_byte(&ptr, 0); /* vmv2 */
2946 static void update_mb_info(MpegEncContext *s, int startcode)
2950 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2951 s->mb_info_size += 12;
2952 s->prev_mb_info = s->last_mb_info;
2955 s->prev_mb_info = put_bits_count(&s->pb)/8;
2956 /* This might have incremented mb_info_size above, and we return without
2957 * actually writing any info into that slot yet. But in that case,
2958 * this will be called again at the start of the after writing the
2959 * start code, actually writing the mb info. */
2963 s->last_mb_info = put_bits_count(&s->pb)/8;
2964 if (!s->mb_info_size)
2965 s->mb_info_size += 12;
2969 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2971 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2972 && s->slice_context_count == 1
2973 && s->pb.buf == s->avctx->internal->byte_buffer) {
2974 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2975 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2977 uint8_t *new_buffer = NULL;
2978 int new_buffer_size = 0;
2980 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2981 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2982 return AVERROR(ENOMEM);
2987 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2988 s->avctx->internal->byte_buffer_size + size_increase);
2990 return AVERROR(ENOMEM);
2992 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2993 av_free(s->avctx->internal->byte_buffer);
2994 s->avctx->internal->byte_buffer = new_buffer;
2995 s->avctx->internal->byte_buffer_size = new_buffer_size;
2996 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2997 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2998 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
3000 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
3001 return AVERROR(EINVAL);
3005 static int encode_thread(AVCodecContext *c, void *arg){
3006 MpegEncContext *s= *(void**)arg;
3008 int chr_h= 16>>s->chroma_y_shift;
3010 MpegEncContext best_s = { 0 }, backup_s;
3011 uint8_t bit_buf[2][MAX_MB_BYTES];
3012 uint8_t bit_buf2[2][MAX_MB_BYTES];
3013 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
3014 PutBitContext pb[2], pb2[2], tex_pb[2];
3016 ff_check_alignment();
3019 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
3020 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
3021 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
3024 s->last_bits= put_bits_count(&s->pb);
3035 /* init last dc values */
3036 /* note: quant matrix value (8) is implied here */
3037 s->last_dc[i] = 128 << s->intra_dc_precision;
3039 s->current_picture.encoding_error[i] = 0;
3041 if(s->codec_id==AV_CODEC_ID_AMV){
3042 s->last_dc[0] = 128*8/13;
3043 s->last_dc[1] = 128*8/14;
3044 s->last_dc[2] = 128*8/14;
3047 memset(s->last_mv, 0, sizeof(s->last_mv));
3051 switch(s->codec_id){
3052 case AV_CODEC_ID_H263:
3053 case AV_CODEC_ID_H263P:
3054 case AV_CODEC_ID_FLV1:
3055 if (CONFIG_H263_ENCODER)
3056 s->gob_index = H263_GOB_HEIGHT(s->height);
3058 case AV_CODEC_ID_MPEG4:
3059 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
3060 ff_mpeg4_init_partitions(s);
3066 s->first_slice_line = 1;
3067 s->ptr_lastgob = s->pb.buf;
3068 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
3072 ff_set_qscale(s, s->qscale);
3073 ff_init_block_index(s);
3075 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3076 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3077 int mb_type= s->mb_type[xy];
3081 int size_increase = s->avctx->internal->byte_buffer_size/4
3082 + s->mb_width*MAX_MB_BYTES;
3084 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
3085 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
3086 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3089 if(s->data_partitioning){
3090 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3091 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3092 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3098 s->mb_y = mb_y; // moved into loop, can get changed by H.261
3099 ff_update_block_index(s);
3101 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3102 ff_h261_reorder_mb_index(s);
3103 xy= s->mb_y*s->mb_stride + s->mb_x;
3104 mb_type= s->mb_type[xy];
3107 /* write gob / video packet header */
3109 int current_packet_size, is_gob_start;
3111 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3113 is_gob_start = s->rtp_payload_size &&
3114 current_packet_size >= s->rtp_payload_size &&
3117 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3119 switch(s->codec_id){
3120 case AV_CODEC_ID_H263:
3121 case AV_CODEC_ID_H263P:
3122 if(!s->h263_slice_structured)
3123 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3125 case AV_CODEC_ID_MPEG2VIDEO:
3126 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3127 case AV_CODEC_ID_MPEG1VIDEO:
3128 if(s->mb_skip_run) is_gob_start=0;
3130 case AV_CODEC_ID_MJPEG:
3131 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3136 if(s->start_mb_y != mb_y || mb_x!=0){
3139 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3140 ff_mpeg4_init_partitions(s);
3144 av_assert2((put_bits_count(&s->pb)&7) == 0);
3145 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3147 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3148 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3149 int d = 100 / s->error_rate;
3151 current_packet_size=0;
3152 s->pb.buf_ptr= s->ptr_lastgob;
3153 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3157 #if FF_API_RTP_CALLBACK
3158 FF_DISABLE_DEPRECATION_WARNINGS
3159 if (s->avctx->rtp_callback){
3160 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3161 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3163 FF_ENABLE_DEPRECATION_WARNINGS
3165 update_mb_info(s, 1);
3167 switch(s->codec_id){
3168 case AV_CODEC_ID_MPEG4:
3169 if (CONFIG_MPEG4_ENCODER) {
3170 ff_mpeg4_encode_video_packet_header(s);
3171 ff_mpeg4_clean_buffers(s);
3174 case AV_CODEC_ID_MPEG1VIDEO:
3175 case AV_CODEC_ID_MPEG2VIDEO:
3176 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3177 ff_mpeg1_encode_slice_header(s);
3178 ff_mpeg1_clean_buffers(s);
3181 case AV_CODEC_ID_H263:
3182 case AV_CODEC_ID_H263P:
3183 if (CONFIG_H263_ENCODER)
3184 ff_h263_encode_gob_header(s, mb_y);
3188 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3189 int bits= put_bits_count(&s->pb);
3190 s->misc_bits+= bits - s->last_bits;
3194 s->ptr_lastgob += current_packet_size;
3195 s->first_slice_line=1;
3196 s->resync_mb_x=mb_x;
3197 s->resync_mb_y=mb_y;
3201 if( (s->resync_mb_x == s->mb_x)
3202 && s->resync_mb_y+1 == s->mb_y){
3203 s->first_slice_line=0;
3207 s->dquant=0; //only for QP_RD
3209 update_mb_info(s, 0);
3211 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3213 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3215 copy_context_before_encode(&backup_s, s, -1);
3217 best_s.data_partitioning= s->data_partitioning;
3218 best_s.partitioned_frame= s->partitioned_frame;
3219 if(s->data_partitioning){
3220 backup_s.pb2= s->pb2;
3221 backup_s.tex_pb= s->tex_pb;
3224 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3225 s->mv_dir = MV_DIR_FORWARD;
3226 s->mv_type = MV_TYPE_16X16;
3228 s->mv[0][0][0] = s->p_mv_table[xy][0];
3229 s->mv[0][0][1] = s->p_mv_table[xy][1];
3230 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3231 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3233 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3234 s->mv_dir = MV_DIR_FORWARD;
3235 s->mv_type = MV_TYPE_FIELD;
3238 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3239 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3240 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3242 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3243 &dmin, &next_block, 0, 0);
3245 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3246 s->mv_dir = MV_DIR_FORWARD;
3247 s->mv_type = MV_TYPE_16X16;
3251 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3252 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3254 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3255 s->mv_dir = MV_DIR_FORWARD;
3256 s->mv_type = MV_TYPE_8X8;
3259 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3260 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3262 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3263 &dmin, &next_block, 0, 0);
3265 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3266 s->mv_dir = MV_DIR_FORWARD;
3267 s->mv_type = MV_TYPE_16X16;
3269 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3270 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3271 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3272 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3274 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3275 s->mv_dir = MV_DIR_BACKWARD;
3276 s->mv_type = MV_TYPE_16X16;
3278 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3279 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3280 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3281 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3283 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3284 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3285 s->mv_type = MV_TYPE_16X16;
3287 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3288 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3289 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3290 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3291 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3292 &dmin, &next_block, 0, 0);
3294 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3295 s->mv_dir = MV_DIR_FORWARD;
3296 s->mv_type = MV_TYPE_FIELD;
3299 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3300 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3301 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3303 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3304 &dmin, &next_block, 0, 0);
3306 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3307 s->mv_dir = MV_DIR_BACKWARD;
3308 s->mv_type = MV_TYPE_FIELD;
3311 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3312 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3313 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3315 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3316 &dmin, &next_block, 0, 0);
3318 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3319 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3320 s->mv_type = MV_TYPE_FIELD;
3322 for(dir=0; dir<2; dir++){
3324 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3325 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3326 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3329 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3330 &dmin, &next_block, 0, 0);
3332 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3334 s->mv_type = MV_TYPE_16X16;
3338 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3339 &dmin, &next_block, 0, 0);
3340 if(s->h263_pred || s->h263_aic){
3342 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3344 ff_clean_intra_table_entries(s); //old mode?
3348 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3349 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3350 const int last_qp= backup_s.qscale;
3353 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3354 static const int dquant_tab[4]={-1,1,-2,2};
3355 int storecoefs = s->mb_intra && s->dc_val[0];
3357 av_assert2(backup_s.dquant == 0);
3360 s->mv_dir= best_s.mv_dir;
3361 s->mv_type = MV_TYPE_16X16;
3362 s->mb_intra= best_s.mb_intra;
3363 s->mv[0][0][0] = best_s.mv[0][0][0];
3364 s->mv[0][0][1] = best_s.mv[0][0][1];
3365 s->mv[1][0][0] = best_s.mv[1][0][0];
3366 s->mv[1][0][1] = best_s.mv[1][0][1];
3368 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3369 for(; qpi<4; qpi++){
3370 int dquant= dquant_tab[qpi];
3371 qp= last_qp + dquant;
3372 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3374 backup_s.dquant= dquant;
3377 dc[i]= s->dc_val[0][ s->block_index[i] ];
3378 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3382 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3383 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3384 if(best_s.qscale != qp){
3387 s->dc_val[0][ s->block_index[i] ]= dc[i];
3388 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3395 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3396 int mx= s->b_direct_mv_table[xy][0];
3397 int my= s->b_direct_mv_table[xy][1];
3399 backup_s.dquant = 0;
3400 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3402 ff_mpeg4_set_direct_mv(s, mx, my);
3403 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3404 &dmin, &next_block, mx, my);
3406 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3407 backup_s.dquant = 0;
3408 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3410 ff_mpeg4_set_direct_mv(s, 0, 0);
3411 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3412 &dmin, &next_block, 0, 0);
3414 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3417 coded |= s->block_last_index[i];
3420 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3421 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3422 mx=my=0; //FIXME find the one we actually used
3423 ff_mpeg4_set_direct_mv(s, mx, my);
3424 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3432 s->mv_dir= best_s.mv_dir;
3433 s->mv_type = best_s.mv_type;
3435 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3436 s->mv[0][0][1] = best_s.mv[0][0][1];
3437 s->mv[1][0][0] = best_s.mv[1][0][0];
3438 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3441 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3442 &dmin, &next_block, mx, my);
3447 s->current_picture.qscale_table[xy] = best_s.qscale;
3449 copy_context_after_encode(s, &best_s, -1);
3451 pb_bits_count= put_bits_count(&s->pb);
3452 flush_put_bits(&s->pb);
3453 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3456 if(s->data_partitioning){
3457 pb2_bits_count= put_bits_count(&s->pb2);
3458 flush_put_bits(&s->pb2);
3459 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3460 s->pb2= backup_s.pb2;
3462 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3463 flush_put_bits(&s->tex_pb);
3464 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3465 s->tex_pb= backup_s.tex_pb;
3467 s->last_bits= put_bits_count(&s->pb);
3469 if (CONFIG_H263_ENCODER &&
3470 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3471 ff_h263_update_motion_val(s);
3473 if(next_block==0){ //FIXME 16 vs linesize16
3474 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3475 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3476 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3479 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3480 ff_mpv_decode_mb(s, s->block);
3482 int motion_x = 0, motion_y = 0;
3483 s->mv_type=MV_TYPE_16X16;
3484 // only one MB-Type possible
3487 case CANDIDATE_MB_TYPE_INTRA:
3490 motion_x= s->mv[0][0][0] = 0;
3491 motion_y= s->mv[0][0][1] = 0;
3493 case CANDIDATE_MB_TYPE_INTER:
3494 s->mv_dir = MV_DIR_FORWARD;
3496 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3497 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3499 case CANDIDATE_MB_TYPE_INTER_I:
3500 s->mv_dir = MV_DIR_FORWARD;
3501 s->mv_type = MV_TYPE_FIELD;
3504 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3505 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3506 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3509 case CANDIDATE_MB_TYPE_INTER4V:
3510 s->mv_dir = MV_DIR_FORWARD;
3511 s->mv_type = MV_TYPE_8X8;
3514 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3515 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3518 case CANDIDATE_MB_TYPE_DIRECT:
3519 if (CONFIG_MPEG4_ENCODER) {
3520 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3522 motion_x=s->b_direct_mv_table[xy][0];
3523 motion_y=s->b_direct_mv_table[xy][1];
3524 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3527 case CANDIDATE_MB_TYPE_DIRECT0:
3528 if (CONFIG_MPEG4_ENCODER) {
3529 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3531 ff_mpeg4_set_direct_mv(s, 0, 0);
3534 case CANDIDATE_MB_TYPE_BIDIR:
3535 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3537 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3538 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3539 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3540 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3542 case CANDIDATE_MB_TYPE_BACKWARD:
3543 s->mv_dir = MV_DIR_BACKWARD;
3545 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3546 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3548 case CANDIDATE_MB_TYPE_FORWARD:
3549 s->mv_dir = MV_DIR_FORWARD;
3551 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3552 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3554 case CANDIDATE_MB_TYPE_FORWARD_I:
3555 s->mv_dir = MV_DIR_FORWARD;
3556 s->mv_type = MV_TYPE_FIELD;
3559 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3560 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3561 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3564 case CANDIDATE_MB_TYPE_BACKWARD_I:
3565 s->mv_dir = MV_DIR_BACKWARD;
3566 s->mv_type = MV_TYPE_FIELD;
3569 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3570 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3571 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3574 case CANDIDATE_MB_TYPE_BIDIR_I:
3575 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3576 s->mv_type = MV_TYPE_FIELD;
3578 for(dir=0; dir<2; dir++){
3580 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3581 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3582 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3587 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3590 encode_mb(s, motion_x, motion_y);
3592 // RAL: Update last macroblock type
3593 s->last_mv_dir = s->mv_dir;
3595 if (CONFIG_H263_ENCODER &&
3596 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3597 ff_h263_update_motion_val(s);
3599 ff_mpv_decode_mb(s, s->block);
3602 /* clean the MV table in IPS frames for direct mode in B-frames */
3603 if(s->mb_intra /* && I,P,S_TYPE */){
3604 s->p_mv_table[xy][0]=0;
3605 s->p_mv_table[xy][1]=0;
3608 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3612 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3613 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3615 s->current_picture.encoding_error[0] += sse(
3616 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3617 s->dest[0], w, h, s->linesize);
3618 s->current_picture.encoding_error[1] += sse(
3619 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3620 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3621 s->current_picture.encoding_error[2] += sse(
3622 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3623 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3626 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3627 ff_h263_loop_filter(s);
3629 ff_dlog(s->avctx, "MB %d %d bits\n",
3630 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3634 //not beautiful here but we must write it before flushing so it has to be here
3635 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3636 ff_msmpeg4_encode_ext_header(s);
3640 #if FF_API_RTP_CALLBACK
3641 FF_DISABLE_DEPRECATION_WARNINGS
3642 /* Send the last GOB if RTP */
3643 if (s->avctx->rtp_callback) {
3644 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3645 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3646 /* Call the RTP callback to send the last GOB */
3648 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3650 FF_ENABLE_DEPRECATION_WARNINGS
3656 #define MERGE(field) dst->field += src->field; src->field=0
3657 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3658 MERGE(me.scene_change_score);
3659 MERGE(me.mc_mb_var_sum_temp);
3660 MERGE(me.mb_var_sum_temp);
3663 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3666 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3667 MERGE(dct_count[1]);
3676 MERGE(er.error_count);
3677 MERGE(padding_bug_score);
3678 MERGE(current_picture.encoding_error[0]);
3679 MERGE(current_picture.encoding_error[1]);
3680 MERGE(current_picture.encoding_error[2]);
3682 if (dst->noise_reduction){
3683 for(i=0; i<64; i++){
3684 MERGE(dct_error_sum[0][i]);
3685 MERGE(dct_error_sum[1][i]);
3689 assert(put_bits_count(&src->pb) % 8 ==0);
3690 assert(put_bits_count(&dst->pb) % 8 ==0);
3691 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3692 flush_put_bits(&dst->pb);
3695 static int estimate_qp(MpegEncContext *s, int dry_run){
3696 if (s->next_lambda){
3697 s->current_picture_ptr->f->quality =
3698 s->current_picture.f->quality = s->next_lambda;
3699 if(!dry_run) s->next_lambda= 0;
3700 } else if (!s->fixed_qscale) {
3703 if ((s->avctx->flags & AV_CODEC_FLAG_PASS2) && s->rc_strategy == MPV_RC_STRATEGY_XVID)
3704 quality = ff_xvid_rate_estimate_qscale(s, dry_run);
3707 quality = ff_rate_estimate_qscale(s, dry_run);
3708 s->current_picture_ptr->f->quality =
3709 s->current_picture.f->quality = quality;
3710 if (s->current_picture.f->quality < 0)
3714 if(s->adaptive_quant){
3715 switch(s->codec_id){
3716 case AV_CODEC_ID_MPEG4:
3717 if (CONFIG_MPEG4_ENCODER)
3718 ff_clean_mpeg4_qscales(s);
3720 case AV_CODEC_ID_H263:
3721 case AV_CODEC_ID_H263P:
3722 case AV_CODEC_ID_FLV1:
3723 if (CONFIG_H263_ENCODER)
3724 ff_clean_h263_qscales(s);
3727 ff_init_qscale_tab(s);
3730 s->lambda= s->lambda_table[0];
3733 s->lambda = s->current_picture.f->quality;
3738 /* must be called before writing the header */
3739 static void set_frame_distances(MpegEncContext * s){
3740 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3741 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3743 if(s->pict_type==AV_PICTURE_TYPE_B){
3744 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3745 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3747 s->pp_time= s->time - s->last_non_b_time;
3748 s->last_non_b_time= s->time;
3749 assert(s->picture_number==0 || s->pp_time > 0);
3753 static int encode_picture(MpegEncContext *s, int picture_number)
3757 int context_count = s->slice_context_count;
3759 s->picture_number = picture_number;
3761 /* Reset the average MB variance */
3762 s->me.mb_var_sum_temp =
3763 s->me.mc_mb_var_sum_temp = 0;
3765 /* we need to initialize some time vars before we can encode B-frames */
3766 // RAL: Condition added for MPEG1VIDEO
3767 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3768 set_frame_distances(s);
3769 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3770 ff_set_mpeg4_time(s);
3772 s->me.scene_change_score=0;
3774 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3776 if(s->pict_type==AV_PICTURE_TYPE_I){
3777 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3778 else s->no_rounding=0;
3779 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3780 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3781 s->no_rounding ^= 1;
3784 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3785 if (estimate_qp(s,1) < 0)
3787 ff_get_2pass_fcode(s);
3788 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3789 if(s->pict_type==AV_PICTURE_TYPE_B)
3790 s->lambda= s->last_lambda_for[s->pict_type];
3792 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3796 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3797 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3798 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3799 s->q_chroma_intra_matrix = s->q_intra_matrix;
3800 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3803 s->mb_intra=0; //for the rate distortion & bit compare functions
3804 for(i=1; i<context_count; i++){
3805 ret = ff_update_duplicate_context(s->thread_context[i], s);
3813 /* Estimate motion for every MB */
3814 if(s->pict_type != AV_PICTURE_TYPE_I){
3815 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3816 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3817 if (s->pict_type != AV_PICTURE_TYPE_B) {
3818 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3820 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3824 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3825 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3827 for(i=0; i<s->mb_stride*s->mb_height; i++)
3828 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3830 if(!s->fixed_qscale){
3831 /* finding spatial complexity for I-frame rate control */
3832 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3835 for(i=1; i<context_count; i++){
3836 merge_context_after_me(s, s->thread_context[i]);
3838 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3839 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3842 if (s->me.scene_change_score > s->scenechange_threshold &&
3843 s->pict_type == AV_PICTURE_TYPE_P) {
3844 s->pict_type= AV_PICTURE_TYPE_I;
3845 for(i=0; i<s->mb_stride*s->mb_height; i++)
3846 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3847 if(s->msmpeg4_version >= 3)
3849 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3850 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3854 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3855 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3857 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3859 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3860 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3861 s->f_code= FFMAX3(s->f_code, a, b);
3864 ff_fix_long_p_mvs(s);
3865 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3866 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3870 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3871 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3876 if(s->pict_type==AV_PICTURE_TYPE_B){
3879 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3880 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3881 s->f_code = FFMAX(a, b);
3883 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3884 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3885 s->b_code = FFMAX(a, b);
3887 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3888 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3889 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3890 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3891 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3893 for(dir=0; dir<2; dir++){
3896 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3897 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3898 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3899 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3907 if (estimate_qp(s, 0) < 0)
3910 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3911 s->pict_type == AV_PICTURE_TYPE_I &&
3912 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3913 s->qscale= 3; //reduce clipping problems
3915 if (s->out_format == FMT_MJPEG) {
3916 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3917 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3919 if (s->avctx->intra_matrix) {
3921 luma_matrix = s->avctx->intra_matrix;
3923 if (s->avctx->chroma_intra_matrix)
3924 chroma_matrix = s->avctx->chroma_intra_matrix;
3926 /* for mjpeg, we do include qscale in the matrix */
3928 int j = s->idsp.idct_permutation[i];
3930 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3931 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3933 s->y_dc_scale_table=
3934 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3935 s->chroma_intra_matrix[0] =
3936 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3937 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3938 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3939 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3940 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3943 if(s->codec_id == AV_CODEC_ID_AMV){
3944 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3945 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3947 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3949 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3950 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3952 s->y_dc_scale_table= y;
3953 s->c_dc_scale_table= c;
3954 s->intra_matrix[0] = 13;
3955 s->chroma_intra_matrix[0] = 14;
3956 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3957 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3958 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3959 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3963 //FIXME var duplication
3964 s->current_picture_ptr->f->key_frame =
3965 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3966 s->current_picture_ptr->f->pict_type =
3967 s->current_picture.f->pict_type = s->pict_type;
3969 if (s->current_picture.f->key_frame)
3970 s->picture_in_gop_number=0;
3972 s->mb_x = s->mb_y = 0;
3973 s->last_bits= put_bits_count(&s->pb);
3974 switch(s->out_format) {
3976 if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3977 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3978 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3981 if (CONFIG_H261_ENCODER)
3982 ff_h261_encode_picture_header(s, picture_number);
3985 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3986 ff_wmv2_encode_picture_header(s, picture_number);
3987 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3988 ff_msmpeg4_encode_picture_header(s, picture_number);
3989 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3990 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3993 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3994 ret = ff_rv10_encode_picture_header(s, picture_number);
3998 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3999 ff_rv20_encode_picture_header(s, picture_number);
4000 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
4001 ff_flv_encode_picture_header(s, picture_number);
4002 else if (CONFIG_H263_ENCODER)
4003 ff_h263_encode_picture_header(s, picture_number);
4006 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
4007 ff_mpeg1_encode_picture_header(s, picture_number);
4012 bits= put_bits_count(&s->pb);
4013 s->header_bits= bits - s->last_bits;
4015 for(i=1; i<context_count; i++){
4016 update_duplicate_context_after_me(s->thread_context[i], s);
4018 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
4019 for(i=1; i<context_count; i++){
4020 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
4021 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
4022 merge_context_after_encode(s, s->thread_context[i]);
4028 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
4029 const int intra= s->mb_intra;
4032 s->dct_count[intra]++;
4034 for(i=0; i<64; i++){
4035 int level= block[i];
4039 s->dct_error_sum[intra][i] += level;
4040 level -= s->dct_offset[intra][i];
4041 if(level<0) level=0;
4043 s->dct_error_sum[intra][i] -= level;
4044 level += s->dct_offset[intra][i];
4045 if(level>0) level=0;
4052 static int dct_quantize_trellis_c(MpegEncContext *s,
4053 int16_t *block, int n,
4054 int qscale, int *overflow){
4056 const uint16_t *matrix;
4057 const uint8_t *scantable= s->intra_scantable.scantable;
4058 const uint8_t *perm_scantable= s->intra_scantable.permutated;
4060 unsigned int threshold1, threshold2;
4072 int coeff_count[64];
4073 int qmul, qadd, start_i, last_non_zero, i, dc;
4074 const int esc_length= s->ac_esc_length;
4076 uint8_t * last_length;
4077 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4080 s->fdsp.fdct(block);
4082 if(s->dct_error_sum)
4083 s->denoise_dct(s, block);
4085 qadd= ((qscale-1)|1)*8;
4087 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4088 else mpeg2_qscale = qscale << 1;
4099 /* For AIC we skip quant/dequant of INTRADC */
4104 /* note: block[0] is assumed to be positive */
4105 block[0] = (block[0] + (q >> 1)) / q;
4108 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4109 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4110 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4111 bias= 1<<(QMAT_SHIFT-1);
4113 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4114 length = s->intra_chroma_ac_vlc_length;
4115 last_length= s->intra_chroma_ac_vlc_last_length;
4117 length = s->intra_ac_vlc_length;
4118 last_length= s->intra_ac_vlc_last_length;
4123 qmat = s->q_inter_matrix[qscale];
4124 matrix = s->inter_matrix;
4125 length = s->inter_ac_vlc_length;
4126 last_length= s->inter_ac_vlc_last_length;
4130 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4131 threshold2= (threshold1<<1);
4133 for(i=63; i>=start_i; i--) {
4134 const int j = scantable[i];
4135 int level = block[j] * qmat[j];
4137 if(((unsigned)(level+threshold1))>threshold2){
4143 for(i=start_i; i<=last_non_zero; i++) {
4144 const int j = scantable[i];
4145 int level = block[j] * qmat[j];
4147 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4148 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4149 if(((unsigned)(level+threshold1))>threshold2){
4151 level= (bias + level)>>QMAT_SHIFT;
4153 coeff[1][i]= level-1;
4154 // coeff[2][k]= level-2;
4156 level= (bias - level)>>QMAT_SHIFT;
4157 coeff[0][i]= -level;
4158 coeff[1][i]= -level+1;
4159 // coeff[2][k]= -level+2;
4161 coeff_count[i]= FFMIN(level, 2);
4162 av_assert2(coeff_count[i]);
4165 coeff[0][i]= (level>>31)|1;
4170 *overflow= s->max_qcoeff < max; //overflow might have happened
4172 if(last_non_zero < start_i){
4173 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4174 return last_non_zero;
4177 score_tab[start_i]= 0;
4178 survivor[0]= start_i;
4181 for(i=start_i; i<=last_non_zero; i++){
4182 int level_index, j, zero_distortion;
4183 int dct_coeff= FFABS(block[ scantable[i] ]);
4184 int best_score=256*256*256*120;
4186 if (s->fdsp.fdct == ff_fdct_ifast)
4187 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4188 zero_distortion= dct_coeff*dct_coeff;
4190 for(level_index=0; level_index < coeff_count[i]; level_index++){
4192 int level= coeff[level_index][i];
4193 const int alevel= FFABS(level);
4198 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4199 unquant_coeff= alevel*qmul + qadd;
4200 } else if(s->out_format == FMT_MJPEG) {
4201 j = s->idsp.idct_permutation[scantable[i]];
4202 unquant_coeff = alevel * matrix[j] * 8;
4204 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4206 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4207 unquant_coeff = (unquant_coeff - 1) | 1;
4209 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4210 unquant_coeff = (unquant_coeff - 1) | 1;
4215 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4217 if((level&(~127)) == 0){
4218 for(j=survivor_count-1; j>=0; j--){
4219 int run= i - survivor[j];
4220 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4221 score += score_tab[i-run];
4223 if(score < best_score){
4226 level_tab[i+1]= level-64;
4230 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4231 for(j=survivor_count-1; j>=0; j--){
4232 int run= i - survivor[j];
4233 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4234 score += score_tab[i-run];
4235 if(score < last_score){
4238 last_level= level-64;
4244 distortion += esc_length*lambda;
4245 for(j=survivor_count-1; j>=0; j--){
4246 int run= i - survivor[j];
4247 int score= distortion + score_tab[i-run];
4249 if(score < best_score){
4252 level_tab[i+1]= level-64;
4256 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4257 for(j=survivor_count-1; j>=0; j--){
4258 int run= i - survivor[j];
4259 int score= distortion + score_tab[i-run];
4260 if(score < last_score){
4263 last_level= level-64;
4271 score_tab[i+1]= best_score;
4273 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4274 if(last_non_zero <= 27){
4275 for(; survivor_count; survivor_count--){
4276 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4280 for(; survivor_count; survivor_count--){
4281 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4286 survivor[ survivor_count++ ]= i+1;
4289 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4290 last_score= 256*256*256*120;
4291 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4292 int score= score_tab[i];
4294 score += lambda * 2; // FIXME more exact?
4296 if(score < last_score){
4299 last_level= level_tab[i];
4300 last_run= run_tab[i];
4305 s->coded_score[n] = last_score;
4307 dc= FFABS(block[0]);
4308 last_non_zero= last_i - 1;
4309 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4311 if(last_non_zero < start_i)
4312 return last_non_zero;
4314 if(last_non_zero == 0 && start_i == 0){
4316 int best_score= dc * dc;
4318 for(i=0; i<coeff_count[0]; i++){
4319 int level= coeff[i][0];
4320 int alevel= FFABS(level);
4321 int unquant_coeff, score, distortion;
4323 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4324 unquant_coeff= (alevel*qmul + qadd)>>3;
4326 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4327 unquant_coeff = (unquant_coeff - 1) | 1;
4329 unquant_coeff = (unquant_coeff + 4) >> 3;
4330 unquant_coeff<<= 3 + 3;
4332 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4334 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4335 else score= distortion + esc_length*lambda;
4337 if(score < best_score){
4339 best_level= level - 64;
4342 block[0]= best_level;
4343 s->coded_score[n] = best_score - dc*dc;
4344 if(best_level == 0) return -1;
4345 else return last_non_zero;
4349 av_assert2(last_level);
4351 block[ perm_scantable[last_non_zero] ]= last_level;
4354 for(; i>start_i; i -= run_tab[i] + 1){
4355 block[ perm_scantable[i-1] ]= level_tab[i];
4358 return last_non_zero;
4361 //#define REFINE_STATS 1
4362 static int16_t basis[64][64];
4364 static void build_basis(uint8_t *perm){
4371 double s= 0.25*(1<<BASIS_SHIFT);
4373 int perm_index= perm[index];
4374 if(i==0) s*= sqrt(0.5);
4375 if(j==0) s*= sqrt(0.5);
4376 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4383 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4384 int16_t *block, int16_t *weight, int16_t *orig,
4387 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4388 const uint8_t *scantable= s->intra_scantable.scantable;
4389 const uint8_t *perm_scantable= s->intra_scantable.permutated;
4390 // unsigned int threshold1, threshold2;
4395 int qmul, qadd, start_i, last_non_zero, i, dc;
4397 uint8_t * last_length;
4399 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4402 static int after_last=0;
4403 static int to_zero=0;
4404 static int from_zero=0;
4407 static int messed_sign=0;
4410 if(basis[0][0] == 0)
4411 build_basis(s->idsp.idct_permutation);
4422 /* For AIC we skip quant/dequant of INTRADC */
4426 q <<= RECON_SHIFT-3;
4427 /* note: block[0] is assumed to be positive */
4429 // block[0] = (block[0] + (q >> 1)) / q;
4431 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4432 // bias= 1<<(QMAT_SHIFT-1);
4433 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4434 length = s->intra_chroma_ac_vlc_length;
4435 last_length= s->intra_chroma_ac_vlc_last_length;
4437 length = s->intra_ac_vlc_length;
4438 last_length= s->intra_ac_vlc_last_length;
4443 length = s->inter_ac_vlc_length;
4444 last_length= s->inter_ac_vlc_last_length;
4446 last_non_zero = s->block_last_index[n];
4451 dc += (1<<(RECON_SHIFT-1));
4452 for(i=0; i<64; i++){
4453 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4456 STOP_TIMER("memset rem[]")}
4459 for(i=0; i<64; i++){
4464 w= FFABS(weight[i]) + qns*one;
4465 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4468 // w=weight[i] = (63*qns + (w/2)) / w;
4471 av_assert2(w<(1<<6));
4474 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4480 for(i=start_i; i<=last_non_zero; i++){
4481 int j= perm_scantable[i];
4482 const int level= block[j];
4486 if(level<0) coeff= qmul*level - qadd;
4487 else coeff= qmul*level + qadd;
4488 run_tab[rle_index++]=run;
4491 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4497 if(last_non_zero>0){
4498 STOP_TIMER("init rem[]")
4505 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4508 int run2, best_unquant_change=0, analyze_gradient;
4512 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4514 if(analyze_gradient){
4518 for(i=0; i<64; i++){
4521 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4524 STOP_TIMER("rem*w*w")}
4534 const int level= block[0];
4535 int change, old_coeff;
4537 av_assert2(s->mb_intra);
4541 for(change=-1; change<=1; change+=2){
4542 int new_level= level + change;
4543 int score, new_coeff;
4545 new_coeff= q*new_level;
4546 if(new_coeff >= 2048 || new_coeff < 0)
4549 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4550 new_coeff - old_coeff);
4551 if(score<best_score){
4554 best_change= change;
4555 best_unquant_change= new_coeff - old_coeff;
4562 run2= run_tab[rle_index++];
4566 for(i=start_i; i<64; i++){
4567 int j= perm_scantable[i];
4568 const int level= block[j];
4569 int change, old_coeff;
4571 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4575 if(level<0) old_coeff= qmul*level - qadd;
4576 else old_coeff= qmul*level + qadd;
4577 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4581 av_assert2(run2>=0 || i >= last_non_zero );
4584 for(change=-1; change<=1; change+=2){
4585 int new_level= level + change;
4586 int score, new_coeff, unquant_change;
4589 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4593 if(new_level<0) new_coeff= qmul*new_level - qadd;
4594 else new_coeff= qmul*new_level + qadd;
4595 if(new_coeff >= 2048 || new_coeff <= -2048)
4597 //FIXME check for overflow
4600 if(level < 63 && level > -63){
4601 if(i < last_non_zero)
4602 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4603 - length[UNI_AC_ENC_INDEX(run, level+64)];
4605 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4606 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4609 av_assert2(FFABS(new_level)==1);
4611 if(analyze_gradient){
4612 int g= d1[ scantable[i] ];
4613 if(g && (g^new_level) >= 0)
4617 if(i < last_non_zero){
4618 int next_i= i + run2 + 1;
4619 int next_level= block[ perm_scantable[next_i] ] + 64;
4621 if(next_level&(~127))
4624 if(next_i < last_non_zero)
4625 score += length[UNI_AC_ENC_INDEX(run, 65)]
4626 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4627 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4629 score += length[UNI_AC_ENC_INDEX(run, 65)]
4630 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4631 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4633 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4635 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4636 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4642 av_assert2(FFABS(level)==1);
4644 if(i < last_non_zero){
4645 int next_i= i + run2 + 1;
4646 int next_level= block[ perm_scantable[next_i] ] + 64;
4648 if(next_level&(~127))
4651 if(next_i < last_non_zero)
4652 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4653 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4654 - length[UNI_AC_ENC_INDEX(run, 65)];
4656 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4657 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4658 - length[UNI_AC_ENC_INDEX(run, 65)];
4660 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4662 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4663 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4670 unquant_change= new_coeff - old_coeff;
4671 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4673 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4675 if(score<best_score){
4678 best_change= change;
4679 best_unquant_change= unquant_change;
4683 prev_level= level + 64;
4684 if(prev_level&(~127))
4693 STOP_TIMER("iterative step")}
4697 int j= perm_scantable[ best_coeff ];
4699 block[j] += best_change;
4701 if(best_coeff > last_non_zero){
4702 last_non_zero= best_coeff;
4703 av_assert2(block[j]);
4710 if(block[j] - best_change){
4711 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4723 for(; last_non_zero>=start_i; last_non_zero--){
4724 if(block[perm_scantable[last_non_zero]])
4730 if(256*256*256*64 % count == 0){
4731 av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4736 for(i=start_i; i<=last_non_zero; i++){
4737 int j= perm_scantable[i];
4738 const int level= block[j];
4741 run_tab[rle_index++]=run;
4748 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4754 if(last_non_zero>0){
4755 STOP_TIMER("iterative search")
4760 return last_non_zero;
4764 * Permute an 8x8 block according to permutation.
4765 * @param block the block which will be permuted according to
4766 * the given permutation vector
4767 * @param permutation the permutation vector
4768 * @param last the last non zero coefficient in scantable order, used to
4769 * speed the permutation up
4770 * @param scantable the used scantable, this is only used to speed the
4771 * permutation up, the block is not (inverse) permutated
4772 * to scantable order!
4774 void ff_block_permute(int16_t *block, uint8_t *permutation,
4775 const uint8_t *scantable, int last)
4782 //FIXME it is ok but not clean and might fail for some permutations
4783 // if (permutation[1] == 1)
4786 for (i = 0; i <= last; i++) {
4787 const int j = scantable[i];
4792 for (i = 0; i <= last; i++) {
4793 const int j = scantable[i];
4794 const int perm_j = permutation[j];
4795 block[perm_j] = temp[j];
4799 int ff_dct_quantize_c(MpegEncContext *s,
4800 int16_t *block, int n,
4801 int qscale, int *overflow)
4803 int i, j, level, last_non_zero, q, start_i;
4805 const uint8_t *scantable= s->intra_scantable.scantable;
4808 unsigned int threshold1, threshold2;
4810 s->fdsp.fdct(block);
4812 if(s->dct_error_sum)
4813 s->denoise_dct(s, block);
4823 /* For AIC we skip quant/dequant of INTRADC */
4826 /* note: block[0] is assumed to be positive */
4827 block[0] = (block[0] + (q >> 1)) / q;
4830 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4831 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4835 qmat = s->q_inter_matrix[qscale];
4836 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4838 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4839 threshold2= (threshold1<<1);
4840 for(i=63;i>=start_i;i--) {
4842 level = block[j] * qmat[j];
4844 if(((unsigned)(level+threshold1))>threshold2){
4851 for(i=start_i; i<=last_non_zero; i++) {
4853 level = block[j] * qmat[j];
4855 // if( bias+level >= (1<<QMAT_SHIFT)
4856 // || bias-level >= (1<<QMAT_SHIFT)){
4857 if(((unsigned)(level+threshold1))>threshold2){
4859 level= (bias + level)>>QMAT_SHIFT;
4862 level= (bias - level)>>QMAT_SHIFT;
4870 *overflow= s->max_qcoeff < max; //overflow might have happened
4872 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4873 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4874 ff_block_permute(block, s->idsp.idct_permutation,
4875 scantable, last_non_zero);
4877 return last_non_zero;
4880 #define OFFSET(x) offsetof(MpegEncContext, x)
4881 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4882 static const AVOption h263_options[] = {
4883 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4884 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4889 static const AVClass h263_class = {
4890 .class_name = "H.263 encoder",
4891 .item_name = av_default_item_name,
4892 .option = h263_options,
4893 .version = LIBAVUTIL_VERSION_INT,
4896 AVCodec ff_h263_encoder = {
4898 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4899 .type = AVMEDIA_TYPE_VIDEO,
4900 .id = AV_CODEC_ID_H263,
4901 .priv_data_size = sizeof(MpegEncContext),
4902 .init = ff_mpv_encode_init,
4903 .encode2 = ff_mpv_encode_picture,
4904 .close = ff_mpv_encode_end,
4905 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4906 .priv_class = &h263_class,
4909 static const AVOption h263p_options[] = {
4910 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4911 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4912 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4913 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4917 static const AVClass h263p_class = {
4918 .class_name = "H.263p encoder",
4919 .item_name = av_default_item_name,
4920 .option = h263p_options,
4921 .version = LIBAVUTIL_VERSION_INT,
4924 AVCodec ff_h263p_encoder = {
4926 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4927 .type = AVMEDIA_TYPE_VIDEO,
4928 .id = AV_CODEC_ID_H263P,
4929 .priv_data_size = sizeof(MpegEncContext),
4930 .init = ff_mpv_encode_init,
4931 .encode2 = ff_mpv_encode_picture,
4932 .close = ff_mpv_encode_end,
4933 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4934 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4935 .priv_class = &h263p_class,
4938 static const AVClass msmpeg4v2_class = {
4939 .class_name = "msmpeg4v2 encoder",
4940 .item_name = av_default_item_name,
4941 .option = ff_mpv_generic_options,
4942 .version = LIBAVUTIL_VERSION_INT,
4945 AVCodec ff_msmpeg4v2_encoder = {
4946 .name = "msmpeg4v2",
4947 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4948 .type = AVMEDIA_TYPE_VIDEO,
4949 .id = AV_CODEC_ID_MSMPEG4V2,
4950 .priv_data_size = sizeof(MpegEncContext),
4951 .init = ff_mpv_encode_init,
4952 .encode2 = ff_mpv_encode_picture,
4953 .close = ff_mpv_encode_end,
4954 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4955 .priv_class = &msmpeg4v2_class,
4958 static const AVClass msmpeg4v3_class = {
4959 .class_name = "msmpeg4v3 encoder",
4960 .item_name = av_default_item_name,
4961 .option = ff_mpv_generic_options,
4962 .version = LIBAVUTIL_VERSION_INT,
4965 AVCodec ff_msmpeg4v3_encoder = {
4967 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4968 .type = AVMEDIA_TYPE_VIDEO,
4969 .id = AV_CODEC_ID_MSMPEG4V3,
4970 .priv_data_size = sizeof(MpegEncContext),
4971 .init = ff_mpv_encode_init,
4972 .encode2 = ff_mpv_encode_picture,
4973 .close = ff_mpv_encode_end,
4974 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4975 .priv_class = &msmpeg4v3_class,
4978 static const AVClass wmv1_class = {
4979 .class_name = "wmv1 encoder",
4980 .item_name = av_default_item_name,
4981 .option = ff_mpv_generic_options,
4982 .version = LIBAVUTIL_VERSION_INT,
4985 AVCodec ff_wmv1_encoder = {
4987 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4988 .type = AVMEDIA_TYPE_VIDEO,
4989 .id = AV_CODEC_ID_WMV1,
4990 .priv_data_size = sizeof(MpegEncContext),
4991 .init = ff_mpv_encode_init,
4992 .encode2 = ff_mpv_encode_picture,
4993 .close = ff_mpv_encode_end,
4994 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4995 .priv_class = &wmv1_class,