2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "libavutil/timer.h"
46 #include "mpegvideo.h"
47 #include "mpegvideodata.h"
51 #include "mjpegenc_common.h"
53 #include "mpegutils.h"
56 #include "pixblockdsp.h"
60 #include "aandcttab.h"
62 #include "mpeg4video.h"
64 #include "bytestream.h"
71 #define QUANT_BIAS_SHIFT 8
73 #define QMAT_SHIFT_MMX 16
76 static int encode_picture(MpegEncContext *s, int picture_number);
77 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
78 static int sse_mb(MpegEncContext *s);
79 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
80 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
82 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
83 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
85 const AVOption ff_mpv_generic_options[] = {
90 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
91 uint16_t (*qmat16)[2][64],
92 const uint16_t *quant_matrix,
93 int bias, int qmin, int qmax, int intra)
95 FDCTDSPContext *fdsp = &s->fdsp;
99 for (qscale = qmin; qscale <= qmax; qscale++) {
103 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
104 else qscale2 = qscale << 1;
106 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
108 fdsp->fdct == ff_faandct ||
109 #endif /* CONFIG_FAANDCT */
110 fdsp->fdct == ff_jpeg_fdct_islow_10) {
111 for (i = 0; i < 64; i++) {
112 const int j = s->idsp.idct_permutation[i];
113 int64_t den = (int64_t) qscale2 * quant_matrix[j];
114 /* 16 <= qscale * quant_matrix[i] <= 7905
115 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
116 * 19952 <= x <= 249205026
117 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
118 * 3444240 >= (1 << 36) / (x) >= 275 */
120 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
122 } else if (fdsp->fdct == ff_fdct_ifast) {
123 for (i = 0; i < 64; i++) {
124 const int j = s->idsp.idct_permutation[i];
125 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
126 /* 16 <= qscale * quant_matrix[i] <= 7905
127 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
128 * 19952 <= x <= 249205026
129 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
130 * 3444240 >= (1 << 36) / (x) >= 275 */
132 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
135 for (i = 0; i < 64; i++) {
136 const int j = s->idsp.idct_permutation[i];
137 int64_t den = (int64_t) qscale2 * quant_matrix[j];
138 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
139 * Assume x = qscale * quant_matrix[i]
141 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
142 * so 32768 >= (1 << 19) / (x) >= 67 */
143 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
144 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
145 // (qscale * quant_matrix[i]);
146 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
148 if (qmat16[qscale][0][i] == 0 ||
149 qmat16[qscale][0][i] == 128 * 256)
150 qmat16[qscale][0][i] = 128 * 256 - 1;
151 qmat16[qscale][1][i] =
152 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
153 qmat16[qscale][0][i]);
157 for (i = intra; i < 64; i++) {
159 if (fdsp->fdct == ff_fdct_ifast) {
160 max = (8191LL * ff_aanscales[i]) >> 14;
162 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
168 av_log(NULL, AV_LOG_INFO,
169 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
174 static inline void update_qscale(MpegEncContext *s)
176 if (s->q_scale_type == 1 && 0) {
178 int bestdiff=INT_MAX;
181 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
182 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
183 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
184 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
186 if (diff < bestdiff) {
193 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
194 (FF_LAMBDA_SHIFT + 7);
195 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
198 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
202 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
208 for (i = 0; i < 64; i++) {
209 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
216 * init s->current_picture.qscale_table from s->lambda_table
218 void ff_init_qscale_tab(MpegEncContext *s)
220 int8_t * const qscale_table = s->current_picture.qscale_table;
223 for (i = 0; i < s->mb_num; i++) {
224 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
225 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
226 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
231 static void update_duplicate_context_after_me(MpegEncContext *dst,
234 #define COPY(a) dst->a= src->a
236 COPY(current_picture);
242 COPY(picture_in_gop_number);
243 COPY(gop_picture_number);
244 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
245 COPY(progressive_frame); // FIXME don't set in encode_header
246 COPY(partitioned_frame); // FIXME don't set in encode_header
251 * Set the given MpegEncContext to defaults for encoding.
252 * the changed fields will not depend upon the prior state of the MpegEncContext.
254 static void mpv_encode_defaults(MpegEncContext *s)
257 ff_mpv_common_defaults(s);
259 for (i = -16; i < 16; i++) {
260 default_fcode_tab[i + MAX_MV] = 1;
262 s->me.mv_penalty = default_mv_penalty;
263 s->fcode_tab = default_fcode_tab;
265 s->input_picture_number = 0;
266 s->picture_in_gop_number = 0;
269 av_cold int ff_dct_encode_init(MpegEncContext *s)
272 ff_dct_encode_init_x86(s);
274 if (CONFIG_H263_ENCODER)
275 ff_h263dsp_init(&s->h263dsp);
276 if (!s->dct_quantize)
277 s->dct_quantize = ff_dct_quantize_c;
279 s->denoise_dct = denoise_dct_c;
280 s->fast_dct_quantize = s->dct_quantize;
281 if (s->avctx->trellis)
282 s->dct_quantize = dct_quantize_trellis_c;
287 /* init video encoder */
288 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
290 MpegEncContext *s = avctx->priv_data;
291 AVCPBProperties *cpb_props;
292 int i, ret, format_supported;
294 mpv_encode_defaults(s);
296 switch (avctx->codec_id) {
297 case AV_CODEC_ID_MPEG2VIDEO:
298 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
299 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
300 av_log(avctx, AV_LOG_ERROR,
301 "only YUV420 and YUV422 are supported\n");
305 case AV_CODEC_ID_MJPEG:
306 case AV_CODEC_ID_AMV:
307 format_supported = 0;
308 /* JPEG color space */
309 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
310 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
311 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
312 (avctx->color_range == AVCOL_RANGE_JPEG &&
313 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
314 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
315 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
316 format_supported = 1;
317 /* MPEG color space */
318 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
319 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
320 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
321 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
322 format_supported = 1;
324 if (!format_supported) {
325 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
330 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
331 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
336 switch (avctx->pix_fmt) {
337 case AV_PIX_FMT_YUVJ444P:
338 case AV_PIX_FMT_YUV444P:
339 s->chroma_format = CHROMA_444;
341 case AV_PIX_FMT_YUVJ422P:
342 case AV_PIX_FMT_YUV422P:
343 s->chroma_format = CHROMA_422;
345 case AV_PIX_FMT_YUVJ420P:
346 case AV_PIX_FMT_YUV420P:
348 s->chroma_format = CHROMA_420;
352 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
354 #if FF_API_PRIVATE_OPT
355 FF_DISABLE_DEPRECATION_WARNINGS
356 if (avctx->rtp_payload_size)
357 s->rtp_payload_size = avctx->rtp_payload_size;
358 if (avctx->me_penalty_compensation)
359 s->me_penalty_compensation = avctx->me_penalty_compensation;
361 s->me_pre = avctx->pre_me;
362 FF_ENABLE_DEPRECATION_WARNINGS
365 s->bit_rate = avctx->bit_rate;
366 s->width = avctx->width;
367 s->height = avctx->height;
368 if (avctx->gop_size > 600 &&
369 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
370 av_log(avctx, AV_LOG_WARNING,
371 "keyframe interval too large!, reducing it from %d to %d\n",
372 avctx->gop_size, 600);
373 avctx->gop_size = 600;
375 s->gop_size = avctx->gop_size;
377 if (avctx->max_b_frames > MAX_B_FRAMES) {
378 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
379 "is %d.\n", MAX_B_FRAMES);
380 avctx->max_b_frames = MAX_B_FRAMES;
382 s->max_b_frames = avctx->max_b_frames;
383 s->codec_id = avctx->codec->id;
384 s->strict_std_compliance = avctx->strict_std_compliance;
385 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
386 s->rtp_mode = !!s->rtp_payload_size;
387 s->intra_dc_precision = avctx->intra_dc_precision;
389 // workaround some differences between how applications specify dc precision
390 if (s->intra_dc_precision < 0) {
391 s->intra_dc_precision += 8;
392 } else if (s->intra_dc_precision >= 8)
393 s->intra_dc_precision -= 8;
395 if (s->intra_dc_precision < 0) {
396 av_log(avctx, AV_LOG_ERROR,
397 "intra dc precision must be positive, note some applications use"
398 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
399 return AVERROR(EINVAL);
402 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
405 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
406 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
407 return AVERROR(EINVAL);
409 s->user_specified_pts = AV_NOPTS_VALUE;
411 if (s->gop_size <= 1) {
418 #if FF_API_MOTION_EST
419 FF_DISABLE_DEPRECATION_WARNINGS
420 s->me_method = avctx->me_method;
421 FF_ENABLE_DEPRECATION_WARNINGS
425 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
428 FF_DISABLE_DEPRECATION_WARNINGS
429 if (avctx->border_masking != 0.0)
430 s->border_masking = avctx->border_masking;
431 FF_ENABLE_DEPRECATION_WARNINGS
434 s->adaptive_quant = (s->avctx->lumi_masking ||
435 s->avctx->dark_masking ||
436 s->avctx->temporal_cplx_masking ||
437 s->avctx->spatial_cplx_masking ||
438 s->avctx->p_masking ||
440 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
443 s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
445 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
446 switch(avctx->codec_id) {
447 case AV_CODEC_ID_MPEG1VIDEO:
448 case AV_CODEC_ID_MPEG2VIDEO:
449 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
451 case AV_CODEC_ID_MPEG4:
452 case AV_CODEC_ID_MSMPEG4V1:
453 case AV_CODEC_ID_MSMPEG4V2:
454 case AV_CODEC_ID_MSMPEG4V3:
455 if (avctx->rc_max_rate >= 15000000) {
456 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
457 } else if(avctx->rc_max_rate >= 2000000) {
458 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
459 } else if(avctx->rc_max_rate >= 384000) {
460 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
462 avctx->rc_buffer_size = 40;
463 avctx->rc_buffer_size *= 16384;
466 if (avctx->rc_buffer_size) {
467 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
471 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
472 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
476 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
477 av_log(avctx, AV_LOG_INFO,
478 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
481 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
482 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
486 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
487 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
491 if (avctx->rc_max_rate &&
492 avctx->rc_max_rate == avctx->bit_rate &&
493 avctx->rc_max_rate != avctx->rc_min_rate) {
494 av_log(avctx, AV_LOG_INFO,
495 "impossible bitrate constraints, this will fail\n");
498 if (avctx->rc_buffer_size &&
499 avctx->bit_rate * (int64_t)avctx->time_base.num >
500 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
501 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
505 if (!s->fixed_qscale &&
506 avctx->bit_rate * av_q2d(avctx->time_base) >
507 avctx->bit_rate_tolerance) {
508 av_log(avctx, AV_LOG_WARNING,
509 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
510 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
513 if (s->avctx->rc_max_rate &&
514 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
515 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
516 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
517 90000LL * (avctx->rc_buffer_size - 1) >
518 s->avctx->rc_max_rate * 0xFFFFLL) {
519 av_log(avctx, AV_LOG_INFO,
520 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
521 "specified vbv buffer is too large for the given bitrate!\n");
524 if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
525 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
526 s->codec_id != AV_CODEC_ID_FLV1) {
527 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
531 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
532 av_log(avctx, AV_LOG_ERROR,
533 "OBMC is only supported with simple mb decision\n");
537 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
538 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
542 if (s->max_b_frames &&
543 s->codec_id != AV_CODEC_ID_MPEG4 &&
544 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
545 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
546 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
549 if (s->max_b_frames < 0) {
550 av_log(avctx, AV_LOG_ERROR,
551 "max b frames must be 0 or positive for mpegvideo based encoders\n");
555 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
556 s->codec_id == AV_CODEC_ID_H263 ||
557 s->codec_id == AV_CODEC_ID_H263P) &&
558 (avctx->sample_aspect_ratio.num > 255 ||
559 avctx->sample_aspect_ratio.den > 255)) {
560 av_log(avctx, AV_LOG_WARNING,
561 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
562 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
563 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
564 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
567 if ((s->codec_id == AV_CODEC_ID_H263 ||
568 s->codec_id == AV_CODEC_ID_H263P) &&
569 (avctx->width > 2048 ||
570 avctx->height > 1152 )) {
571 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
574 if ((s->codec_id == AV_CODEC_ID_H263 ||
575 s->codec_id == AV_CODEC_ID_H263P) &&
576 ((avctx->width &3) ||
577 (avctx->height&3) )) {
578 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
582 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
583 (avctx->width > 4095 ||
584 avctx->height > 4095 )) {
585 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
589 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
590 (avctx->width > 16383 ||
591 avctx->height > 16383 )) {
592 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
596 if (s->codec_id == AV_CODEC_ID_RV10 &&
598 avctx->height&15 )) {
599 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
600 return AVERROR(EINVAL);
603 if (s->codec_id == AV_CODEC_ID_RV20 &&
606 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
607 return AVERROR(EINVAL);
610 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
611 s->codec_id == AV_CODEC_ID_WMV2) &&
613 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
617 if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
618 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
619 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
623 #if FF_API_PRIVATE_OPT
624 FF_DISABLE_DEPRECATION_WARNINGS
625 if (avctx->mpeg_quant)
626 s->mpeg_quant = avctx->mpeg_quant;
627 FF_ENABLE_DEPRECATION_WARNINGS
630 // FIXME mpeg2 uses that too
631 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
632 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
633 av_log(avctx, AV_LOG_ERROR,
634 "mpeg2 style quantization not supported by codec\n");
638 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
639 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
643 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
644 s->avctx->mb_decision != FF_MB_DECISION_RD) {
645 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
649 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
650 (s->codec_id == AV_CODEC_ID_AMV ||
651 s->codec_id == AV_CODEC_ID_MJPEG)) {
652 // Used to produce garbage with MJPEG.
653 av_log(avctx, AV_LOG_ERROR,
654 "QP RD is no longer compatible with MJPEG or AMV\n");
658 #if FF_API_PRIVATE_OPT
659 FF_DISABLE_DEPRECATION_WARNINGS
660 if (avctx->scenechange_threshold)
661 s->scenechange_threshold = avctx->scenechange_threshold;
662 FF_ENABLE_DEPRECATION_WARNINGS
665 if (s->scenechange_threshold < 1000000000 &&
666 (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
667 av_log(avctx, AV_LOG_ERROR,
668 "closed gop with scene change detection are not supported yet, "
669 "set threshold to 1000000000\n");
673 if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
674 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
675 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
676 av_log(avctx, AV_LOG_ERROR,
677 "low delay forcing is only available for mpeg2, "
678 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
681 if (s->max_b_frames != 0) {
682 av_log(avctx, AV_LOG_ERROR,
683 "B-frames cannot be used with low delay\n");
688 if (s->q_scale_type == 1) {
689 if (avctx->qmax > 28) {
690 av_log(avctx, AV_LOG_ERROR,
691 "non linear quant only supports qmax <= 28 currently\n");
696 if (avctx->slices > 1 &&
697 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
698 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
699 return AVERROR(EINVAL);
702 if (s->avctx->thread_count > 1 &&
703 s->codec_id != AV_CODEC_ID_MPEG4 &&
704 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
705 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
706 s->codec_id != AV_CODEC_ID_MJPEG &&
707 (s->codec_id != AV_CODEC_ID_H263P)) {
708 av_log(avctx, AV_LOG_ERROR,
709 "multi threaded encoding not supported by codec\n");
713 if (s->avctx->thread_count < 1) {
714 av_log(avctx, AV_LOG_ERROR,
715 "automatic thread number detection not supported by codec, "
720 if (!avctx->time_base.den || !avctx->time_base.num) {
721 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
725 #if FF_API_PRIVATE_OPT
726 FF_DISABLE_DEPRECATION_WARNINGS
727 if (avctx->b_frame_strategy)
728 s->b_frame_strategy = avctx->b_frame_strategy;
729 if (avctx->b_sensitivity != 40)
730 s->b_sensitivity = avctx->b_sensitivity;
731 FF_ENABLE_DEPRECATION_WARNINGS
734 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
735 av_log(avctx, AV_LOG_INFO,
736 "notice: b_frame_strategy only affects the first pass\n");
737 s->b_frame_strategy = 0;
740 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
742 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
743 avctx->time_base.den /= i;
744 avctx->time_base.num /= i;
748 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
749 // (a + x * 3 / 8) / x
750 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
751 s->inter_quant_bias = 0;
753 s->intra_quant_bias = 0;
755 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
758 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
759 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
760 return AVERROR(EINVAL);
763 #if FF_API_QUANT_BIAS
764 FF_DISABLE_DEPRECATION_WARNINGS
765 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
766 s->intra_quant_bias = avctx->intra_quant_bias;
767 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
768 s->inter_quant_bias = avctx->inter_quant_bias;
769 FF_ENABLE_DEPRECATION_WARNINGS
772 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
774 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
775 s->avctx->time_base.den > (1 << 16) - 1) {
776 av_log(avctx, AV_LOG_ERROR,
777 "timebase %d/%d not supported by MPEG 4 standard, "
778 "the maximum admitted value for the timebase denominator "
779 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
783 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
785 switch (avctx->codec->id) {
786 case AV_CODEC_ID_MPEG1VIDEO:
787 s->out_format = FMT_MPEG1;
788 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
789 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
791 case AV_CODEC_ID_MPEG2VIDEO:
792 s->out_format = FMT_MPEG1;
793 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
794 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
797 case AV_CODEC_ID_MJPEG:
798 case AV_CODEC_ID_AMV:
799 s->out_format = FMT_MJPEG;
800 s->intra_only = 1; /* force intra only for jpeg */
801 if (!CONFIG_MJPEG_ENCODER ||
802 ff_mjpeg_encode_init(s) < 0)
807 case AV_CODEC_ID_H261:
808 if (!CONFIG_H261_ENCODER)
810 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
811 av_log(avctx, AV_LOG_ERROR,
812 "The specified picture size of %dx%d is not valid for the "
813 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
814 s->width, s->height);
817 s->out_format = FMT_H261;
820 s->rtp_mode = 0; /* Sliced encoding not supported */
822 case AV_CODEC_ID_H263:
823 if (!CONFIG_H263_ENCODER)
825 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
826 s->width, s->height) == 8) {
827 av_log(avctx, AV_LOG_ERROR,
828 "The specified picture size of %dx%d is not valid for "
829 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
830 "352x288, 704x576, and 1408x1152. "
831 "Try H.263+.\n", s->width, s->height);
834 s->out_format = FMT_H263;
838 case AV_CODEC_ID_H263P:
839 s->out_format = FMT_H263;
842 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
843 s->modified_quant = s->h263_aic;
844 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
845 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
848 /* These are just to be sure */
852 case AV_CODEC_ID_FLV1:
853 s->out_format = FMT_H263;
854 s->h263_flv = 2; /* format = 1; 11-bit codes */
855 s->unrestricted_mv = 1;
856 s->rtp_mode = 0; /* don't allow GOB */
860 case AV_CODEC_ID_RV10:
861 s->out_format = FMT_H263;
865 case AV_CODEC_ID_RV20:
866 s->out_format = FMT_H263;
869 s->modified_quant = 1;
873 s->unrestricted_mv = 0;
875 case AV_CODEC_ID_MPEG4:
876 s->out_format = FMT_H263;
878 s->unrestricted_mv = 1;
879 s->low_delay = s->max_b_frames ? 0 : 1;
880 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
882 case AV_CODEC_ID_MSMPEG4V2:
883 s->out_format = FMT_H263;
885 s->unrestricted_mv = 1;
886 s->msmpeg4_version = 2;
890 case AV_CODEC_ID_MSMPEG4V3:
891 s->out_format = FMT_H263;
893 s->unrestricted_mv = 1;
894 s->msmpeg4_version = 3;
895 s->flipflop_rounding = 1;
899 case AV_CODEC_ID_WMV1:
900 s->out_format = FMT_H263;
902 s->unrestricted_mv = 1;
903 s->msmpeg4_version = 4;
904 s->flipflop_rounding = 1;
908 case AV_CODEC_ID_WMV2:
909 s->out_format = FMT_H263;
911 s->unrestricted_mv = 1;
912 s->msmpeg4_version = 5;
913 s->flipflop_rounding = 1;
921 #if FF_API_PRIVATE_OPT
922 FF_DISABLE_DEPRECATION_WARNINGS
923 if (avctx->noise_reduction)
924 s->noise_reduction = avctx->noise_reduction;
925 FF_ENABLE_DEPRECATION_WARNINGS
928 avctx->has_b_frames = !s->low_delay;
932 s->progressive_frame =
933 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
934 AV_CODEC_FLAG_INTERLACED_ME) ||
939 if (ff_mpv_common_init(s) < 0)
942 ff_fdctdsp_init(&s->fdsp, avctx);
943 ff_me_cmp_init(&s->mecc, avctx);
944 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
945 ff_pixblockdsp_init(&s->pdsp, avctx);
946 ff_qpeldsp_init(&s->qdsp);
948 if (s->msmpeg4_version) {
949 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
950 2 * 2 * (MAX_LEVEL + 1) *
951 (MAX_RUN + 1) * 2 * sizeof(int), fail);
953 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
955 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
956 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
957 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
958 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
959 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
960 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
961 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
962 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
963 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
964 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
967 if (s->noise_reduction) {
968 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
969 2 * 64 * sizeof(uint16_t), fail);
972 ff_dct_encode_init(s);
974 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
975 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
977 if (s->slice_context_count > 1) {
980 if (avctx->codec_id == AV_CODEC_ID_H263P)
981 s->h263_slice_structured = 1;
984 s->quant_precision = 5;
986 #if FF_API_PRIVATE_OPT
987 FF_DISABLE_DEPRECATION_WARNINGS
988 if (avctx->frame_skip_threshold)
989 s->frame_skip_threshold = avctx->frame_skip_threshold;
990 if (avctx->frame_skip_factor)
991 s->frame_skip_factor = avctx->frame_skip_factor;
992 if (avctx->frame_skip_exp)
993 s->frame_skip_exp = avctx->frame_skip_exp;
994 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
995 s->frame_skip_cmp = avctx->frame_skip_cmp;
996 FF_ENABLE_DEPRECATION_WARNINGS
999 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
1000 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
1002 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
1003 ff_h261_encode_init(s);
1004 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
1005 ff_h263_encode_init(s);
1006 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
1007 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
1009 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1010 && s->out_format == FMT_MPEG1)
1011 ff_mpeg1_encode_init(s);
1014 for (i = 0; i < 64; i++) {
1015 int j = s->idsp.idct_permutation[i];
1016 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1018 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1019 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1020 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1021 s->intra_matrix[j] =
1022 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1025 s->chroma_intra_matrix[j] =
1026 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1027 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1029 if (s->avctx->intra_matrix)
1030 s->intra_matrix[j] = s->avctx->intra_matrix[i];
1031 if (s->avctx->inter_matrix)
1032 s->inter_matrix[j] = s->avctx->inter_matrix[i];
1035 /* precompute matrix */
1036 /* for mjpeg, we do include qscale in the matrix */
1037 if (s->out_format != FMT_MJPEG) {
1038 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1039 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1041 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1042 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1046 #if FF_API_RC_STRATEGY
1047 FF_DISABLE_DEPRECATION_WARNINGS
1048 if (!s->rc_strategy)
1049 s->rc_strategy = s->avctx->rc_strategy;
1050 FF_ENABLE_DEPRECATION_WARNINGS
1053 if (ff_rate_control_init(s) < 0)
1056 #if FF_API_RC_STRATEGY
1057 av_assert0(MPV_RC_STRATEGY_XVID == FF_RC_STRATEGY_XVID);
1060 if ((s->avctx->flags & AV_CODEC_FLAG_PASS2) && s->rc_strategy == MPV_RC_STRATEGY_XVID) {
1062 ret = ff_xvid_rate_control_init(s);
1064 ret = AVERROR(ENOSYS);
1065 av_log(s->avctx, AV_LOG_ERROR,
1066 "Xvid ratecontrol requires libavcodec compiled with Xvid support.\n");
1073 FF_DISABLE_DEPRECATION_WARNINGS
1074 if (avctx->rc_qsquish != 0.0)
1075 s->rc_qsquish = avctx->rc_qsquish;
1076 if (avctx->rc_qmod_amp != 0.0)
1077 s->rc_qmod_amp = avctx->rc_qmod_amp;
1078 if (avctx->rc_qmod_freq)
1079 s->rc_qmod_freq = avctx->rc_qmod_freq;
1080 if (avctx->rc_buffer_aggressivity != 1.0)
1081 s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
1082 if (avctx->rc_initial_cplx != 0.0)
1083 s->rc_initial_cplx = avctx->rc_initial_cplx;
1085 s->lmin = avctx->lmin;
1087 s->lmax = avctx->lmax;
1090 av_freep(&s->rc_eq);
1091 s->rc_eq = av_strdup(avctx->rc_eq);
1093 return AVERROR(ENOMEM);
1095 FF_ENABLE_DEPRECATION_WARNINGS
1098 #if FF_API_PRIVATE_OPT
1099 FF_DISABLE_DEPRECATION_WARNINGS
1100 if (avctx->brd_scale)
1101 s->brd_scale = avctx->brd_scale;
1103 if (avctx->prediction_method)
1104 s->pred = avctx->prediction_method + 1;
1105 FF_ENABLE_DEPRECATION_WARNINGS
1108 if (s->b_frame_strategy == 2) {
1109 for (i = 0; i < s->max_b_frames + 2; i++) {
1110 s->tmp_frames[i] = av_frame_alloc();
1111 if (!s->tmp_frames[i])
1112 return AVERROR(ENOMEM);
1114 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1115 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1116 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1118 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1124 cpb_props = ff_add_cpb_side_data(avctx);
1126 return AVERROR(ENOMEM);
1127 cpb_props->max_bitrate = avctx->rc_max_rate;
1128 cpb_props->min_bitrate = avctx->rc_min_rate;
1129 cpb_props->avg_bitrate = avctx->bit_rate;
1130 cpb_props->buffer_size = avctx->rc_buffer_size;
1134 ff_mpv_encode_end(avctx);
1135 return AVERROR_UNKNOWN;
1138 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1140 MpegEncContext *s = avctx->priv_data;
1143 ff_rate_control_uninit(s);
1145 if ((avctx->flags & AV_CODEC_FLAG_PASS2) && s->rc_strategy == MPV_RC_STRATEGY_XVID)
1146 ff_xvid_rate_control_uninit(s);
1149 ff_mpv_common_end(s);
1150 if (CONFIG_MJPEG_ENCODER &&
1151 s->out_format == FMT_MJPEG)
1152 ff_mjpeg_encode_close(s);
1154 av_freep(&avctx->extradata);
1156 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1157 av_frame_free(&s->tmp_frames[i]);
1159 ff_free_picture_tables(&s->new_picture);
1160 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1162 av_freep(&s->avctx->stats_out);
1163 av_freep(&s->ac_stats);
1165 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1166 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1167 s->q_chroma_intra_matrix= NULL;
1168 s->q_chroma_intra_matrix16= NULL;
1169 av_freep(&s->q_intra_matrix);
1170 av_freep(&s->q_inter_matrix);
1171 av_freep(&s->q_intra_matrix16);
1172 av_freep(&s->q_inter_matrix16);
1173 av_freep(&s->input_picture);
1174 av_freep(&s->reordered_input_picture);
1175 av_freep(&s->dct_offset);
1180 static int get_sae(uint8_t *src, int ref, int stride)
1185 for (y = 0; y < 16; y++) {
1186 for (x = 0; x < 16; x++) {
1187 acc += FFABS(src[x + y * stride] - ref);
1194 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1195 uint8_t *ref, int stride)
1201 h = s->height & ~15;
1203 for (y = 0; y < h; y += 16) {
1204 for (x = 0; x < w; x += 16) {
1205 int offset = x + y * stride;
1206 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1208 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1209 int sae = get_sae(src + offset, mean, stride);
1211 acc += sae + 500 < sad;
1217 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1219 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1220 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1221 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1222 &s->linesize, &s->uvlinesize);
1225 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1227 Picture *pic = NULL;
1229 int i, display_picture_number = 0, ret;
1230 int encoding_delay = s->max_b_frames ? s->max_b_frames
1231 : (s->low_delay ? 0 : 1);
1232 int flush_offset = 1;
1237 display_picture_number = s->input_picture_number++;
1239 if (pts != AV_NOPTS_VALUE) {
1240 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1241 int64_t last = s->user_specified_pts;
1244 av_log(s->avctx, AV_LOG_ERROR,
1245 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1247 return AVERROR(EINVAL);
1250 if (!s->low_delay && display_picture_number == 1)
1251 s->dts_delta = pts - last;
1253 s->user_specified_pts = pts;
1255 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1256 s->user_specified_pts =
1257 pts = s->user_specified_pts + 1;
1258 av_log(s->avctx, AV_LOG_INFO,
1259 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1262 pts = display_picture_number;
1266 if (!pic_arg->buf[0] ||
1267 pic_arg->linesize[0] != s->linesize ||
1268 pic_arg->linesize[1] != s->uvlinesize ||
1269 pic_arg->linesize[2] != s->uvlinesize)
1271 if ((s->width & 15) || (s->height & 15))
1273 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1275 if (s->linesize & (STRIDE_ALIGN-1))
1278 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1279 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1281 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1285 pic = &s->picture[i];
1289 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1292 ret = alloc_picture(s, pic, direct);
1297 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1298 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1299 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1302 int h_chroma_shift, v_chroma_shift;
1303 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1307 for (i = 0; i < 3; i++) {
1308 int src_stride = pic_arg->linesize[i];
1309 int dst_stride = i ? s->uvlinesize : s->linesize;
1310 int h_shift = i ? h_chroma_shift : 0;
1311 int v_shift = i ? v_chroma_shift : 0;
1312 int w = s->width >> h_shift;
1313 int h = s->height >> v_shift;
1314 uint8_t *src = pic_arg->data[i];
1315 uint8_t *dst = pic->f->data[i];
1318 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1319 && !s->progressive_sequence
1320 && FFALIGN(s->height, 32) - s->height > 16)
1323 if (!s->avctx->rc_buffer_size)
1324 dst += INPLACE_OFFSET;
1326 if (src_stride == dst_stride)
1327 memcpy(dst, src, src_stride * h);
1330 uint8_t *dst2 = dst;
1332 memcpy(dst2, src, w);
1337 if ((s->width & 15) || (s->height & (vpad-1))) {
1338 s->mpvencdsp.draw_edges(dst, dst_stride,
1348 ret = av_frame_copy_props(pic->f, pic_arg);
1352 pic->f->display_picture_number = display_picture_number;
1353 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1355 /* Flushing: When we have not received enough input frames,
1356 * ensure s->input_picture[0] contains the first picture */
1357 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1358 if (s->input_picture[flush_offset])
1361 if (flush_offset <= 1)
1364 encoding_delay = encoding_delay - flush_offset + 1;
1367 /* shift buffer entries */
1368 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1369 s->input_picture[i - flush_offset] = s->input_picture[i];
1371 s->input_picture[encoding_delay] = (Picture*) pic;
1376 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1380 int64_t score64 = 0;
1382 for (plane = 0; plane < 3; plane++) {
1383 const int stride = p->f->linesize[plane];
1384 const int bw = plane ? 1 : 2;
1385 for (y = 0; y < s->mb_height * bw; y++) {
1386 for (x = 0; x < s->mb_width * bw; x++) {
1387 int off = p->shared ? 0 : 16;
1388 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1389 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1390 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1392 switch (FFABS(s->frame_skip_exp)) {
1393 case 0: score = FFMAX(score, v); break;
1394 case 1: score += FFABS(v); break;
1395 case 2: score64 += v * (int64_t)v; break;
1396 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1397 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1406 if (s->frame_skip_exp < 0)
1407 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1408 -1.0/s->frame_skip_exp);
1410 if (score64 < s->frame_skip_threshold)
1412 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1417 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1419 AVPacket pkt = { 0 };
1423 av_init_packet(&pkt);
1425 ret = avcodec_send_frame(c, frame);
1430 ret = avcodec_receive_packet(c, &pkt);
1433 av_packet_unref(&pkt);
1434 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1441 static int estimate_best_b_count(MpegEncContext *s)
1443 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1444 const int scale = s->brd_scale;
1445 int width = s->width >> scale;
1446 int height = s->height >> scale;
1447 int i, j, out_size, p_lambda, b_lambda, lambda2;
1448 int64_t best_rd = INT64_MAX;
1449 int best_b_count = -1;
1452 av_assert0(scale >= 0 && scale <= 3);
1455 //s->next_picture_ptr->quality;
1456 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1457 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1458 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1459 if (!b_lambda) // FIXME we should do this somewhere else
1460 b_lambda = p_lambda;
1461 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1464 for (i = 0; i < s->max_b_frames + 2; i++) {
1465 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1466 s->next_picture_ptr;
1469 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1470 pre_input = *pre_input_ptr;
1471 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1473 if (!pre_input.shared && i) {
1474 data[0] += INPLACE_OFFSET;
1475 data[1] += INPLACE_OFFSET;
1476 data[2] += INPLACE_OFFSET;
1479 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1480 s->tmp_frames[i]->linesize[0],
1482 pre_input.f->linesize[0],
1484 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1485 s->tmp_frames[i]->linesize[1],
1487 pre_input.f->linesize[1],
1488 width >> 1, height >> 1);
1489 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1490 s->tmp_frames[i]->linesize[2],
1492 pre_input.f->linesize[2],
1493 width >> 1, height >> 1);
1497 for (j = 0; j < s->max_b_frames + 1; j++) {
1501 if (!s->input_picture[j])
1504 c = avcodec_alloc_context3(NULL);
1506 return AVERROR(ENOMEM);
1510 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1511 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1512 c->mb_decision = s->avctx->mb_decision;
1513 c->me_cmp = s->avctx->me_cmp;
1514 c->mb_cmp = s->avctx->mb_cmp;
1515 c->me_sub_cmp = s->avctx->me_sub_cmp;
1516 c->pix_fmt = AV_PIX_FMT_YUV420P;
1517 c->time_base = s->avctx->time_base;
1518 c->max_b_frames = s->max_b_frames;
1520 ret = avcodec_open2(c, codec, NULL);
1524 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1525 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1527 out_size = encode_frame(c, s->tmp_frames[0]);
1533 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1535 for (i = 0; i < s->max_b_frames + 1; i++) {
1536 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1538 s->tmp_frames[i + 1]->pict_type = is_p ?
1539 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1540 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1542 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1548 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1551 /* get the delayed frames */
1552 out_size = encode_frame(c, NULL);
1557 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1559 rd += c->error[0] + c->error[1] + c->error[2];
1567 avcodec_free_context(&c);
1572 return best_b_count;
1575 static int select_input_picture(MpegEncContext *s)
1579 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1580 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1581 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1583 /* set next picture type & ordering */
1584 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1585 if (s->frame_skip_threshold || s->frame_skip_factor) {
1586 if (s->picture_in_gop_number < s->gop_size &&
1587 s->next_picture_ptr &&
1588 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1589 // FIXME check that the gop check above is +-1 correct
1590 av_frame_unref(s->input_picture[0]->f);
1592 ff_vbv_update(s, 0);
1598 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1599 !s->next_picture_ptr || s->intra_only) {
1600 s->reordered_input_picture[0] = s->input_picture[0];
1601 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1602 s->reordered_input_picture[0]->f->coded_picture_number =
1603 s->coded_picture_number++;
1607 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1608 for (i = 0; i < s->max_b_frames + 1; i++) {
1609 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1611 if (pict_num >= s->rc_context.num_entries)
1613 if (!s->input_picture[i]) {
1614 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1618 s->input_picture[i]->f->pict_type =
1619 s->rc_context.entry[pict_num].new_pict_type;
1623 if (s->b_frame_strategy == 0) {
1624 b_frames = s->max_b_frames;
1625 while (b_frames && !s->input_picture[b_frames])
1627 } else if (s->b_frame_strategy == 1) {
1628 for (i = 1; i < s->max_b_frames + 1; i++) {
1629 if (s->input_picture[i] &&
1630 s->input_picture[i]->b_frame_score == 0) {
1631 s->input_picture[i]->b_frame_score =
1633 s->input_picture[i ]->f->data[0],
1634 s->input_picture[i - 1]->f->data[0],
1638 for (i = 0; i < s->max_b_frames + 1; i++) {
1639 if (!s->input_picture[i] ||
1640 s->input_picture[i]->b_frame_score - 1 >
1641 s->mb_num / s->b_sensitivity)
1645 b_frames = FFMAX(0, i - 1);
1648 for (i = 0; i < b_frames + 1; i++) {
1649 s->input_picture[i]->b_frame_score = 0;
1651 } else if (s->b_frame_strategy == 2) {
1652 b_frames = estimate_best_b_count(s);
1659 for (i = b_frames - 1; i >= 0; i--) {
1660 int type = s->input_picture[i]->f->pict_type;
1661 if (type && type != AV_PICTURE_TYPE_B)
1664 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1665 b_frames == s->max_b_frames) {
1666 av_log(s->avctx, AV_LOG_ERROR,
1667 "warning, too many B-frames in a row\n");
1670 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1671 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1672 s->gop_size > s->picture_in_gop_number) {
1673 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1675 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1677 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1681 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1682 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1685 s->reordered_input_picture[0] = s->input_picture[b_frames];
1686 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1687 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1688 s->reordered_input_picture[0]->f->coded_picture_number =
1689 s->coded_picture_number++;
1690 for (i = 0; i < b_frames; i++) {
1691 s->reordered_input_picture[i + 1] = s->input_picture[i];
1692 s->reordered_input_picture[i + 1]->f->pict_type =
1694 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1695 s->coded_picture_number++;
1700 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1702 if (s->reordered_input_picture[0]) {
1703 s->reordered_input_picture[0]->reference =
1704 s->reordered_input_picture[0]->f->pict_type !=
1705 AV_PICTURE_TYPE_B ? 3 : 0;
1707 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1710 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1711 // input is a shared pix, so we can't modify it -> allocate a new
1712 // one & ensure that the shared one is reuseable
1715 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1718 pic = &s->picture[i];
1720 pic->reference = s->reordered_input_picture[0]->reference;
1721 if (alloc_picture(s, pic, 0) < 0) {
1725 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1729 /* mark us unused / free shared pic */
1730 av_frame_unref(s->reordered_input_picture[0]->f);
1731 s->reordered_input_picture[0]->shared = 0;
1733 s->current_picture_ptr = pic;
1735 // input is not a shared pix -> reuse buffer for current_pix
1736 s->current_picture_ptr = s->reordered_input_picture[0];
1737 for (i = 0; i < 4; i++) {
1738 s->new_picture.f->data[i] += INPLACE_OFFSET;
1741 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1742 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1743 s->current_picture_ptr)) < 0)
1746 s->picture_number = s->new_picture.f->display_picture_number;
1751 static void frame_end(MpegEncContext *s)
1753 if (s->unrestricted_mv &&
1754 s->current_picture.reference &&
1756 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1757 int hshift = desc->log2_chroma_w;
1758 int vshift = desc->log2_chroma_h;
1759 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1760 s->current_picture.f->linesize[0],
1761 s->h_edge_pos, s->v_edge_pos,
1762 EDGE_WIDTH, EDGE_WIDTH,
1763 EDGE_TOP | EDGE_BOTTOM);
1764 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1765 s->current_picture.f->linesize[1],
1766 s->h_edge_pos >> hshift,
1767 s->v_edge_pos >> vshift,
1768 EDGE_WIDTH >> hshift,
1769 EDGE_WIDTH >> vshift,
1770 EDGE_TOP | EDGE_BOTTOM);
1771 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1772 s->current_picture.f->linesize[2],
1773 s->h_edge_pos >> hshift,
1774 s->v_edge_pos >> vshift,
1775 EDGE_WIDTH >> hshift,
1776 EDGE_WIDTH >> vshift,
1777 EDGE_TOP | EDGE_BOTTOM);
1782 s->last_pict_type = s->pict_type;
1783 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1784 if (s->pict_type!= AV_PICTURE_TYPE_B)
1785 s->last_non_b_pict_type = s->pict_type;
1787 #if FF_API_CODED_FRAME
1788 FF_DISABLE_DEPRECATION_WARNINGS
1789 av_frame_unref(s->avctx->coded_frame);
1790 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1791 FF_ENABLE_DEPRECATION_WARNINGS
1793 #if FF_API_ERROR_FRAME
1794 FF_DISABLE_DEPRECATION_WARNINGS
1795 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1796 sizeof(s->current_picture.encoding_error));
1797 FF_ENABLE_DEPRECATION_WARNINGS
1801 static void update_noise_reduction(MpegEncContext *s)
1805 for (intra = 0; intra < 2; intra++) {
1806 if (s->dct_count[intra] > (1 << 16)) {
1807 for (i = 0; i < 64; i++) {
1808 s->dct_error_sum[intra][i] >>= 1;
1810 s->dct_count[intra] >>= 1;
1813 for (i = 0; i < 64; i++) {
1814 s->dct_offset[intra][i] = (s->noise_reduction *
1815 s->dct_count[intra] +
1816 s->dct_error_sum[intra][i] / 2) /
1817 (s->dct_error_sum[intra][i] + 1);
1822 static int frame_start(MpegEncContext *s)
1826 /* mark & release old frames */
1827 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1828 s->last_picture_ptr != s->next_picture_ptr &&
1829 s->last_picture_ptr->f->buf[0]) {
1830 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1833 s->current_picture_ptr->f->pict_type = s->pict_type;
1834 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1836 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1837 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1838 s->current_picture_ptr)) < 0)
1841 if (s->pict_type != AV_PICTURE_TYPE_B) {
1842 s->last_picture_ptr = s->next_picture_ptr;
1844 s->next_picture_ptr = s->current_picture_ptr;
1847 if (s->last_picture_ptr) {
1848 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1849 if (s->last_picture_ptr->f->buf[0] &&
1850 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1851 s->last_picture_ptr)) < 0)
1854 if (s->next_picture_ptr) {
1855 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1856 if (s->next_picture_ptr->f->buf[0] &&
1857 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1858 s->next_picture_ptr)) < 0)
1862 if (s->picture_structure!= PICT_FRAME) {
1864 for (i = 0; i < 4; i++) {
1865 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1866 s->current_picture.f->data[i] +=
1867 s->current_picture.f->linesize[i];
1869 s->current_picture.f->linesize[i] *= 2;
1870 s->last_picture.f->linesize[i] *= 2;
1871 s->next_picture.f->linesize[i] *= 2;
1875 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1876 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1877 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1878 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1879 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1880 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1882 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1883 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1886 if (s->dct_error_sum) {
1887 av_assert2(s->noise_reduction && s->encoding);
1888 update_noise_reduction(s);
1894 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1895 const AVFrame *pic_arg, int *got_packet)
1897 MpegEncContext *s = avctx->priv_data;
1898 int i, stuffing_count, ret;
1899 int context_count = s->slice_context_count;
1901 s->vbv_ignore_qmax = 0;
1903 s->picture_in_gop_number++;
1905 if (load_input_picture(s, pic_arg) < 0)
1908 if (select_input_picture(s) < 0) {
1913 if (s->new_picture.f->data[0]) {
1914 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1915 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1917 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1918 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1921 s->mb_info_ptr = av_packet_new_side_data(pkt,
1922 AV_PKT_DATA_H263_MB_INFO,
1923 s->mb_width*s->mb_height*12);
1924 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1927 for (i = 0; i < context_count; i++) {
1928 int start_y = s->thread_context[i]->start_mb_y;
1929 int end_y = s->thread_context[i]-> end_mb_y;
1930 int h = s->mb_height;
1931 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1932 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1934 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1937 s->pict_type = s->new_picture.f->pict_type;
1939 ret = frame_start(s);
1943 ret = encode_picture(s, s->picture_number);
1944 if (growing_buffer) {
1945 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1946 pkt->data = s->pb.buf;
1947 pkt->size = avctx->internal->byte_buffer_size;
1952 #if FF_API_STAT_BITS
1953 FF_DISABLE_DEPRECATION_WARNINGS
1954 avctx->header_bits = s->header_bits;
1955 avctx->mv_bits = s->mv_bits;
1956 avctx->misc_bits = s->misc_bits;
1957 avctx->i_tex_bits = s->i_tex_bits;
1958 avctx->p_tex_bits = s->p_tex_bits;
1959 avctx->i_count = s->i_count;
1960 // FIXME f/b_count in avctx
1961 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1962 avctx->skip_count = s->skip_count;
1963 FF_ENABLE_DEPRECATION_WARNINGS
1968 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1969 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1971 if (avctx->rc_buffer_size) {
1972 RateControlContext *rcc = &s->rc_context;
1973 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1974 int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1975 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1977 if (put_bits_count(&s->pb) > max_size &&
1978 s->lambda < s->lmax) {
1979 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1980 (s->qscale + 1) / s->qscale);
1981 if (s->adaptive_quant) {
1983 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1984 s->lambda_table[i] =
1985 FFMAX(s->lambda_table[i] + min_step,
1986 s->lambda_table[i] * (s->qscale + 1) /
1989 s->mb_skipped = 0; // done in frame_start()
1990 // done in encode_picture() so we must undo it
1991 if (s->pict_type == AV_PICTURE_TYPE_P) {
1992 if (s->flipflop_rounding ||
1993 s->codec_id == AV_CODEC_ID_H263P ||
1994 s->codec_id == AV_CODEC_ID_MPEG4)
1995 s->no_rounding ^= 1;
1997 if (s->pict_type != AV_PICTURE_TYPE_B) {
1998 s->time_base = s->last_time_base;
1999 s->last_non_b_time = s->time - s->pp_time;
2001 for (i = 0; i < context_count; i++) {
2002 PutBitContext *pb = &s->thread_context[i]->pb;
2003 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
2005 s->vbv_ignore_qmax = 1;
2006 av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
2010 av_assert0(s->avctx->rc_max_rate);
2013 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
2014 ff_write_pass1_stats(s);
2016 for (i = 0; i < 4; i++) {
2017 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
2018 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
2020 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
2021 s->current_picture_ptr->encoding_error,
2022 (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
2025 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
2026 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
2027 s->misc_bits + s->i_tex_bits +
2029 flush_put_bits(&s->pb);
2030 s->frame_bits = put_bits_count(&s->pb);
2032 stuffing_count = ff_vbv_update(s, s->frame_bits);
2033 s->stuffing_bits = 8*stuffing_count;
2034 if (stuffing_count) {
2035 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
2036 stuffing_count + 50) {
2037 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
2041 switch (s->codec_id) {
2042 case AV_CODEC_ID_MPEG1VIDEO:
2043 case AV_CODEC_ID_MPEG2VIDEO:
2044 while (stuffing_count--) {
2045 put_bits(&s->pb, 8, 0);
2048 case AV_CODEC_ID_MPEG4:
2049 put_bits(&s->pb, 16, 0);
2050 put_bits(&s->pb, 16, 0x1C3);
2051 stuffing_count -= 4;
2052 while (stuffing_count--) {
2053 put_bits(&s->pb, 8, 0xFF);
2057 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2059 flush_put_bits(&s->pb);
2060 s->frame_bits = put_bits_count(&s->pb);
2063 /* update MPEG-1/2 vbv_delay for CBR */
2064 if (s->avctx->rc_max_rate &&
2065 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
2066 s->out_format == FMT_MPEG1 &&
2067 90000LL * (avctx->rc_buffer_size - 1) <=
2068 s->avctx->rc_max_rate * 0xFFFFLL) {
2069 AVCPBProperties *props;
2072 int vbv_delay, min_delay;
2073 double inbits = s->avctx->rc_max_rate *
2074 av_q2d(s->avctx->time_base);
2075 int minbits = s->frame_bits - 8 *
2076 (s->vbv_delay_ptr - s->pb.buf - 1);
2077 double bits = s->rc_context.buffer_index + minbits - inbits;
2080 av_log(s->avctx, AV_LOG_ERROR,
2081 "Internal error, negative bits\n");
2083 assert(s->repeat_first_field == 0);
2085 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
2086 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
2087 s->avctx->rc_max_rate;
2089 vbv_delay = FFMAX(vbv_delay, min_delay);
2091 av_assert0(vbv_delay < 0xFFFF);
2093 s->vbv_delay_ptr[0] &= 0xF8;
2094 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2095 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2096 s->vbv_delay_ptr[2] &= 0x07;
2097 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2099 props = av_cpb_properties_alloc(&props_size);
2101 return AVERROR(ENOMEM);
2102 props->vbv_delay = vbv_delay * 300;
2104 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2105 (uint8_t*)props, props_size);
2111 #if FF_API_VBV_DELAY
2112 FF_DISABLE_DEPRECATION_WARNINGS
2113 avctx->vbv_delay = vbv_delay * 300;
2114 FF_ENABLE_DEPRECATION_WARNINGS
2117 s->total_bits += s->frame_bits;
2118 #if FF_API_STAT_BITS
2119 FF_DISABLE_DEPRECATION_WARNINGS
2120 avctx->frame_bits = s->frame_bits;
2121 FF_ENABLE_DEPRECATION_WARNINGS
2125 pkt->pts = s->current_picture.f->pts;
2126 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2127 if (!s->current_picture.f->coded_picture_number)
2128 pkt->dts = pkt->pts - s->dts_delta;
2130 pkt->dts = s->reordered_pts;
2131 s->reordered_pts = pkt->pts;
2133 pkt->dts = pkt->pts;
2134 if (s->current_picture.f->key_frame)
2135 pkt->flags |= AV_PKT_FLAG_KEY;
2137 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2142 /* release non-reference frames */
2143 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2144 if (!s->picture[i].reference)
2145 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2148 av_assert1((s->frame_bits & 7) == 0);
2150 pkt->size = s->frame_bits / 8;
2151 *got_packet = !!pkt->size;
2155 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2156 int n, int threshold)
2158 static const char tab[64] = {
2159 3, 2, 2, 1, 1, 1, 1, 1,
2160 1, 1, 1, 1, 1, 1, 1, 1,
2161 1, 1, 1, 1, 1, 1, 1, 1,
2162 0, 0, 0, 0, 0, 0, 0, 0,
2163 0, 0, 0, 0, 0, 0, 0, 0,
2164 0, 0, 0, 0, 0, 0, 0, 0,
2165 0, 0, 0, 0, 0, 0, 0, 0,
2166 0, 0, 0, 0, 0, 0, 0, 0
2171 int16_t *block = s->block[n];
2172 const int last_index = s->block_last_index[n];
2175 if (threshold < 0) {
2177 threshold = -threshold;
2181 /* Are all we could set to zero already zero? */
2182 if (last_index <= skip_dc - 1)
2185 for (i = 0; i <= last_index; i++) {
2186 const int j = s->intra_scantable.permutated[i];
2187 const int level = FFABS(block[j]);
2189 if (skip_dc && i == 0)
2193 } else if (level > 1) {
2199 if (score >= threshold)
2201 for (i = skip_dc; i <= last_index; i++) {
2202 const int j = s->intra_scantable.permutated[i];
2206 s->block_last_index[n] = 0;
2208 s->block_last_index[n] = -1;
2211 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2215 const int maxlevel = s->max_qcoeff;
2216 const int minlevel = s->min_qcoeff;
2220 i = 1; // skip clipping of intra dc
2224 for (; i <= last_index; i++) {
2225 const int j = s->intra_scantable.permutated[i];
2226 int level = block[j];
2228 if (level > maxlevel) {
2231 } else if (level < minlevel) {
2239 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2240 av_log(s->avctx, AV_LOG_INFO,
2241 "warning, clipping %d dct coefficients to %d..%d\n",
2242 overflow, minlevel, maxlevel);
2245 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2249 for (y = 0; y < 8; y++) {
2250 for (x = 0; x < 8; x++) {
2256 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2257 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2258 int v = ptr[x2 + y2 * stride];
2264 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2269 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2270 int motion_x, int motion_y,
2271 int mb_block_height,
2275 int16_t weight[12][64];
2276 int16_t orig[12][64];
2277 const int mb_x = s->mb_x;
2278 const int mb_y = s->mb_y;
2281 int dct_offset = s->linesize * 8; // default for progressive frames
2282 int uv_dct_offset = s->uvlinesize * 8;
2283 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2284 ptrdiff_t wrap_y, wrap_c;
2286 for (i = 0; i < mb_block_count; i++)
2287 skip_dct[i] = s->skipdct;
2289 if (s->adaptive_quant) {
2290 const int last_qp = s->qscale;
2291 const int mb_xy = mb_x + mb_y * s->mb_stride;
2293 s->lambda = s->lambda_table[mb_xy];
2296 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2297 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2298 s->dquant = s->qscale - last_qp;
2300 if (s->out_format == FMT_H263) {
2301 s->dquant = av_clip(s->dquant, -2, 2);
2303 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2305 if (s->pict_type == AV_PICTURE_TYPE_B) {
2306 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2309 if (s->mv_type == MV_TYPE_8X8)
2315 ff_set_qscale(s, last_qp + s->dquant);
2316 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2317 ff_set_qscale(s, s->qscale + s->dquant);
2319 wrap_y = s->linesize;
2320 wrap_c = s->uvlinesize;
2321 ptr_y = s->new_picture.f->data[0] +
2322 (mb_y * 16 * wrap_y) + mb_x * 16;
2323 ptr_cb = s->new_picture.f->data[1] +
2324 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2325 ptr_cr = s->new_picture.f->data[2] +
2326 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2328 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2329 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2330 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2331 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2332 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2334 16, 16, mb_x * 16, mb_y * 16,
2335 s->width, s->height);
2337 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2339 mb_block_width, mb_block_height,
2340 mb_x * mb_block_width, mb_y * mb_block_height,
2342 ptr_cb = ebuf + 16 * wrap_y;
2343 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2345 mb_block_width, mb_block_height,
2346 mb_x * mb_block_width, mb_y * mb_block_height,
2348 ptr_cr = ebuf + 16 * wrap_y + 16;
2352 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2353 int progressive_score, interlaced_score;
2355 s->interlaced_dct = 0;
2356 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2357 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2358 NULL, wrap_y, 8) - 400;
2360 if (progressive_score > 0) {
2361 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2362 NULL, wrap_y * 2, 8) +
2363 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2364 NULL, wrap_y * 2, 8);
2365 if (progressive_score > interlaced_score) {
2366 s->interlaced_dct = 1;
2368 dct_offset = wrap_y;
2369 uv_dct_offset = wrap_c;
2371 if (s->chroma_format == CHROMA_422 ||
2372 s->chroma_format == CHROMA_444)
2378 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2379 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2380 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2381 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2383 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2387 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2388 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2389 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2390 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2391 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2392 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2393 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2394 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2395 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2396 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2397 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2398 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2402 op_pixels_func (*op_pix)[4];
2403 qpel_mc_func (*op_qpix)[16];
2404 uint8_t *dest_y, *dest_cb, *dest_cr;
2406 dest_y = s->dest[0];
2407 dest_cb = s->dest[1];
2408 dest_cr = s->dest[2];
2410 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2411 op_pix = s->hdsp.put_pixels_tab;
2412 op_qpix = s->qdsp.put_qpel_pixels_tab;
2414 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2415 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2418 if (s->mv_dir & MV_DIR_FORWARD) {
2419 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2420 s->last_picture.f->data,
2422 op_pix = s->hdsp.avg_pixels_tab;
2423 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2425 if (s->mv_dir & MV_DIR_BACKWARD) {
2426 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2427 s->next_picture.f->data,
2431 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2432 int progressive_score, interlaced_score;
2434 s->interlaced_dct = 0;
2435 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2436 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2440 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2441 progressive_score -= 400;
2443 if (progressive_score > 0) {
2444 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2446 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2450 if (progressive_score > interlaced_score) {
2451 s->interlaced_dct = 1;
2453 dct_offset = wrap_y;
2454 uv_dct_offset = wrap_c;
2456 if (s->chroma_format == CHROMA_422)
2462 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2463 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2464 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2465 dest_y + dct_offset, wrap_y);
2466 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2467 dest_y + dct_offset + 8, wrap_y);
2469 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2473 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2474 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2475 if (!s->chroma_y_shift) { /* 422 */
2476 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2477 dest_cb + uv_dct_offset, wrap_c);
2478 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2479 dest_cr + uv_dct_offset, wrap_c);
2482 /* pre quantization */
2483 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2484 2 * s->qscale * s->qscale) {
2486 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2488 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2490 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2491 wrap_y, 8) < 20 * s->qscale)
2493 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2494 wrap_y, 8) < 20 * s->qscale)
2496 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2498 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2500 if (!s->chroma_y_shift) { /* 422 */
2501 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2502 dest_cb + uv_dct_offset,
2503 wrap_c, 8) < 20 * s->qscale)
2505 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2506 dest_cr + uv_dct_offset,
2507 wrap_c, 8) < 20 * s->qscale)
2513 if (s->quantizer_noise_shaping) {
2515 get_visual_weight(weight[0], ptr_y , wrap_y);
2517 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2519 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2521 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2523 get_visual_weight(weight[4], ptr_cb , wrap_c);
2525 get_visual_weight(weight[5], ptr_cr , wrap_c);
2526 if (!s->chroma_y_shift) { /* 422 */
2528 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2531 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2534 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2537 /* DCT & quantize */
2538 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2540 for (i = 0; i < mb_block_count; i++) {
2543 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2544 // FIXME we could decide to change to quantizer instead of
2546 // JS: I don't think that would be a good idea it could lower
2547 // quality instead of improve it. Just INTRADC clipping
2548 // deserves changes in quantizer
2550 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2552 s->block_last_index[i] = -1;
2554 if (s->quantizer_noise_shaping) {
2555 for (i = 0; i < mb_block_count; i++) {
2557 s->block_last_index[i] =
2558 dct_quantize_refine(s, s->block[i], weight[i],
2559 orig[i], i, s->qscale);
2564 if (s->luma_elim_threshold && !s->mb_intra)
2565 for (i = 0; i < 4; i++)
2566 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2567 if (s->chroma_elim_threshold && !s->mb_intra)
2568 for (i = 4; i < mb_block_count; i++)
2569 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2571 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2572 for (i = 0; i < mb_block_count; i++) {
2573 if (s->block_last_index[i] == -1)
2574 s->coded_score[i] = INT_MAX / 256;
2579 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2580 s->block_last_index[4] =
2581 s->block_last_index[5] = 0;
2583 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2584 if (!s->chroma_y_shift) { /* 422 / 444 */
2585 for (i=6; i<12; i++) {
2586 s->block_last_index[i] = 0;
2587 s->block[i][0] = s->block[4][0];
2592 // non c quantize code returns incorrect block_last_index FIXME
2593 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2594 for (i = 0; i < mb_block_count; i++) {
2596 if (s->block_last_index[i] > 0) {
2597 for (j = 63; j > 0; j--) {
2598 if (s->block[i][s->intra_scantable.permutated[j]])
2601 s->block_last_index[i] = j;
2606 /* huffman encode */
2607 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2608 case AV_CODEC_ID_MPEG1VIDEO:
2609 case AV_CODEC_ID_MPEG2VIDEO:
2610 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2611 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2613 case AV_CODEC_ID_MPEG4:
2614 if (CONFIG_MPEG4_ENCODER)
2615 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2617 case AV_CODEC_ID_MSMPEG4V2:
2618 case AV_CODEC_ID_MSMPEG4V3:
2619 case AV_CODEC_ID_WMV1:
2620 if (CONFIG_MSMPEG4_ENCODER)
2621 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2623 case AV_CODEC_ID_WMV2:
2624 if (CONFIG_WMV2_ENCODER)
2625 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2627 case AV_CODEC_ID_H261:
2628 if (CONFIG_H261_ENCODER)
2629 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2631 case AV_CODEC_ID_H263:
2632 case AV_CODEC_ID_H263P:
2633 case AV_CODEC_ID_FLV1:
2634 case AV_CODEC_ID_RV10:
2635 case AV_CODEC_ID_RV20:
2636 if (CONFIG_H263_ENCODER)
2637 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2639 case AV_CODEC_ID_MJPEG:
2640 case AV_CODEC_ID_AMV:
2641 if (CONFIG_MJPEG_ENCODER)
2642 ff_mjpeg_encode_mb(s, s->block);
2649 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2651 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2652 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2653 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2656 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2659 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2662 d->mb_skip_run= s->mb_skip_run;
2664 d->last_dc[i] = s->last_dc[i];
2667 d->mv_bits= s->mv_bits;
2668 d->i_tex_bits= s->i_tex_bits;
2669 d->p_tex_bits= s->p_tex_bits;
2670 d->i_count= s->i_count;
2671 d->f_count= s->f_count;
2672 d->b_count= s->b_count;
2673 d->skip_count= s->skip_count;
2674 d->misc_bits= s->misc_bits;
2678 d->qscale= s->qscale;
2679 d->dquant= s->dquant;
2681 d->esc3_level_length= s->esc3_level_length;
2684 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2687 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2688 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2691 d->mb_skip_run= s->mb_skip_run;
2693 d->last_dc[i] = s->last_dc[i];
2696 d->mv_bits= s->mv_bits;
2697 d->i_tex_bits= s->i_tex_bits;
2698 d->p_tex_bits= s->p_tex_bits;
2699 d->i_count= s->i_count;
2700 d->f_count= s->f_count;
2701 d->b_count= s->b_count;
2702 d->skip_count= s->skip_count;
2703 d->misc_bits= s->misc_bits;
2705 d->mb_intra= s->mb_intra;
2706 d->mb_skipped= s->mb_skipped;
2707 d->mv_type= s->mv_type;
2708 d->mv_dir= s->mv_dir;
2710 if(s->data_partitioning){
2712 d->tex_pb= s->tex_pb;
2716 d->block_last_index[i]= s->block_last_index[i];
2717 d->interlaced_dct= s->interlaced_dct;
2718 d->qscale= s->qscale;
2720 d->esc3_level_length= s->esc3_level_length;
2723 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2724 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2725 int *dmin, int *next_block, int motion_x, int motion_y)
2728 uint8_t *dest_backup[3];
2730 copy_context_before_encode(s, backup, type);
2732 s->block= s->blocks[*next_block];
2733 s->pb= pb[*next_block];
2734 if(s->data_partitioning){
2735 s->pb2 = pb2 [*next_block];
2736 s->tex_pb= tex_pb[*next_block];
2740 memcpy(dest_backup, s->dest, sizeof(s->dest));
2741 s->dest[0] = s->sc.rd_scratchpad;
2742 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2743 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2744 av_assert0(s->linesize >= 32); //FIXME
2747 encode_mb(s, motion_x, motion_y);
2749 score= put_bits_count(&s->pb);
2750 if(s->data_partitioning){
2751 score+= put_bits_count(&s->pb2);
2752 score+= put_bits_count(&s->tex_pb);
2755 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2756 ff_mpv_reconstruct_mb(s, s->block);
2758 score *= s->lambda2;
2759 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2763 memcpy(s->dest, dest_backup, sizeof(s->dest));
2770 copy_context_after_encode(best, s, type);
2774 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2775 uint32_t *sq = ff_square_tab + 256;
2780 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2781 else if(w==8 && h==8)
2782 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2786 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2795 static int sse_mb(MpegEncContext *s){
2799 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2800 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2803 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2804 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2805 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2806 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2808 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2809 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2810 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2813 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2814 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2815 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2818 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2819 MpegEncContext *s= *(void**)arg;
2823 s->me.dia_size= s->avctx->pre_dia_size;
2824 s->first_slice_line=1;
2825 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2826 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2827 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2829 s->first_slice_line=0;
2837 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2838 MpegEncContext *s= *(void**)arg;
2840 ff_check_alignment();
2842 s->me.dia_size= s->avctx->dia_size;
2843 s->first_slice_line=1;
2844 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2845 s->mb_x=0; //for block init below
2846 ff_init_block_index(s);
2847 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2848 s->block_index[0]+=2;
2849 s->block_index[1]+=2;
2850 s->block_index[2]+=2;
2851 s->block_index[3]+=2;
2853 /* compute motion vector & mb_type and store in context */
2854 if(s->pict_type==AV_PICTURE_TYPE_B)
2855 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2857 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2859 s->first_slice_line=0;
2864 static int mb_var_thread(AVCodecContext *c, void *arg){
2865 MpegEncContext *s= *(void**)arg;
2868 ff_check_alignment();
2870 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2871 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2874 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2876 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2878 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2879 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2881 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2882 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2883 s->me.mb_var_sum_temp += varc;
2889 static void write_slice_end(MpegEncContext *s){
2890 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2891 if(s->partitioned_frame){
2892 ff_mpeg4_merge_partitions(s);
2895 ff_mpeg4_stuffing(&s->pb);
2896 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2897 ff_mjpeg_encode_stuffing(s);
2900 avpriv_align_put_bits(&s->pb);
2901 flush_put_bits(&s->pb);
2903 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2904 s->misc_bits+= get_bits_diff(s);
2907 static void write_mb_info(MpegEncContext *s)
2909 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2910 int offset = put_bits_count(&s->pb);
2911 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2912 int gobn = s->mb_y / s->gob_index;
2914 if (CONFIG_H263_ENCODER)
2915 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2916 bytestream_put_le32(&ptr, offset);
2917 bytestream_put_byte(&ptr, s->qscale);
2918 bytestream_put_byte(&ptr, gobn);
2919 bytestream_put_le16(&ptr, mba);
2920 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2921 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2922 /* 4MV not implemented */
2923 bytestream_put_byte(&ptr, 0); /* hmv2 */
2924 bytestream_put_byte(&ptr, 0); /* vmv2 */
2927 static void update_mb_info(MpegEncContext *s, int startcode)
2931 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2932 s->mb_info_size += 12;
2933 s->prev_mb_info = s->last_mb_info;
2936 s->prev_mb_info = put_bits_count(&s->pb)/8;
2937 /* This might have incremented mb_info_size above, and we return without
2938 * actually writing any info into that slot yet. But in that case,
2939 * this will be called again at the start of the after writing the
2940 * start code, actually writing the mb info. */
2944 s->last_mb_info = put_bits_count(&s->pb)/8;
2945 if (!s->mb_info_size)
2946 s->mb_info_size += 12;
2950 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2952 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2953 && s->slice_context_count == 1
2954 && s->pb.buf == s->avctx->internal->byte_buffer) {
2955 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2956 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2958 uint8_t *new_buffer = NULL;
2959 int new_buffer_size = 0;
2961 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2962 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2963 return AVERROR(ENOMEM);
2968 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2969 s->avctx->internal->byte_buffer_size + size_increase);
2971 return AVERROR(ENOMEM);
2973 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2974 av_free(s->avctx->internal->byte_buffer);
2975 s->avctx->internal->byte_buffer = new_buffer;
2976 s->avctx->internal->byte_buffer_size = new_buffer_size;
2977 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2978 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2979 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2981 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2982 return AVERROR(EINVAL);
2986 static int encode_thread(AVCodecContext *c, void *arg){
2987 MpegEncContext *s= *(void**)arg;
2989 int chr_h= 16>>s->chroma_y_shift;
2991 MpegEncContext best_s = { 0 }, backup_s;
2992 uint8_t bit_buf[2][MAX_MB_BYTES];
2993 uint8_t bit_buf2[2][MAX_MB_BYTES];
2994 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2995 PutBitContext pb[2], pb2[2], tex_pb[2];
2997 ff_check_alignment();
3000 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
3001 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
3002 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
3005 s->last_bits= put_bits_count(&s->pb);
3016 /* init last dc values */
3017 /* note: quant matrix value (8) is implied here */
3018 s->last_dc[i] = 128 << s->intra_dc_precision;
3020 s->current_picture.encoding_error[i] = 0;
3022 if(s->codec_id==AV_CODEC_ID_AMV){
3023 s->last_dc[0] = 128*8/13;
3024 s->last_dc[1] = 128*8/14;
3025 s->last_dc[2] = 128*8/14;
3028 memset(s->last_mv, 0, sizeof(s->last_mv));
3032 switch(s->codec_id){
3033 case AV_CODEC_ID_H263:
3034 case AV_CODEC_ID_H263P:
3035 case AV_CODEC_ID_FLV1:
3036 if (CONFIG_H263_ENCODER)
3037 s->gob_index = H263_GOB_HEIGHT(s->height);
3039 case AV_CODEC_ID_MPEG4:
3040 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
3041 ff_mpeg4_init_partitions(s);
3047 s->first_slice_line = 1;
3048 s->ptr_lastgob = s->pb.buf;
3049 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
3053 ff_set_qscale(s, s->qscale);
3054 ff_init_block_index(s);
3056 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3057 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3058 int mb_type= s->mb_type[xy];
3062 int size_increase = s->avctx->internal->byte_buffer_size/4
3063 + s->mb_width*MAX_MB_BYTES;
3065 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
3066 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
3067 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3070 if(s->data_partitioning){
3071 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3072 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3073 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3079 s->mb_y = mb_y; // moved into loop, can get changed by H.261
3080 ff_update_block_index(s);
3082 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3083 ff_h261_reorder_mb_index(s);
3084 xy= s->mb_y*s->mb_stride + s->mb_x;
3085 mb_type= s->mb_type[xy];
3088 /* write gob / video packet header */
3090 int current_packet_size, is_gob_start;
3092 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3094 is_gob_start = s->rtp_payload_size &&
3095 current_packet_size >= s->rtp_payload_size &&
3098 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3100 switch(s->codec_id){
3101 case AV_CODEC_ID_H263:
3102 case AV_CODEC_ID_H263P:
3103 if(!s->h263_slice_structured)
3104 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3106 case AV_CODEC_ID_MPEG2VIDEO:
3107 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3108 case AV_CODEC_ID_MPEG1VIDEO:
3109 if(s->mb_skip_run) is_gob_start=0;
3111 case AV_CODEC_ID_MJPEG:
3112 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3117 if(s->start_mb_y != mb_y || mb_x!=0){
3120 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3121 ff_mpeg4_init_partitions(s);
3125 av_assert2((put_bits_count(&s->pb)&7) == 0);
3126 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3128 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3129 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3130 int d = 100 / s->error_rate;
3132 current_packet_size=0;
3133 s->pb.buf_ptr= s->ptr_lastgob;
3134 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3138 #if FF_API_RTP_CALLBACK
3139 FF_DISABLE_DEPRECATION_WARNINGS
3140 if (s->avctx->rtp_callback){
3141 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3142 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3144 FF_ENABLE_DEPRECATION_WARNINGS
3146 update_mb_info(s, 1);
3148 switch(s->codec_id){
3149 case AV_CODEC_ID_MPEG4:
3150 if (CONFIG_MPEG4_ENCODER) {
3151 ff_mpeg4_encode_video_packet_header(s);
3152 ff_mpeg4_clean_buffers(s);
3155 case AV_CODEC_ID_MPEG1VIDEO:
3156 case AV_CODEC_ID_MPEG2VIDEO:
3157 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3158 ff_mpeg1_encode_slice_header(s);
3159 ff_mpeg1_clean_buffers(s);
3162 case AV_CODEC_ID_H263:
3163 case AV_CODEC_ID_H263P:
3164 if (CONFIG_H263_ENCODER)
3165 ff_h263_encode_gob_header(s, mb_y);
3169 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3170 int bits= put_bits_count(&s->pb);
3171 s->misc_bits+= bits - s->last_bits;
3175 s->ptr_lastgob += current_packet_size;
3176 s->first_slice_line=1;
3177 s->resync_mb_x=mb_x;
3178 s->resync_mb_y=mb_y;
3182 if( (s->resync_mb_x == s->mb_x)
3183 && s->resync_mb_y+1 == s->mb_y){
3184 s->first_slice_line=0;
3188 s->dquant=0; //only for QP_RD
3190 update_mb_info(s, 0);
3192 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3194 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3196 copy_context_before_encode(&backup_s, s, -1);
3198 best_s.data_partitioning= s->data_partitioning;
3199 best_s.partitioned_frame= s->partitioned_frame;
3200 if(s->data_partitioning){
3201 backup_s.pb2= s->pb2;
3202 backup_s.tex_pb= s->tex_pb;
3205 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3206 s->mv_dir = MV_DIR_FORWARD;
3207 s->mv_type = MV_TYPE_16X16;
3209 s->mv[0][0][0] = s->p_mv_table[xy][0];
3210 s->mv[0][0][1] = s->p_mv_table[xy][1];
3211 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3212 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3214 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3215 s->mv_dir = MV_DIR_FORWARD;
3216 s->mv_type = MV_TYPE_FIELD;
3219 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3220 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3221 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3223 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3224 &dmin, &next_block, 0, 0);
3226 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3227 s->mv_dir = MV_DIR_FORWARD;
3228 s->mv_type = MV_TYPE_16X16;
3232 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3233 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3235 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3236 s->mv_dir = MV_DIR_FORWARD;
3237 s->mv_type = MV_TYPE_8X8;
3240 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3241 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3243 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3244 &dmin, &next_block, 0, 0);
3246 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3247 s->mv_dir = MV_DIR_FORWARD;
3248 s->mv_type = MV_TYPE_16X16;
3250 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3251 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3252 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3253 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3255 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3256 s->mv_dir = MV_DIR_BACKWARD;
3257 s->mv_type = MV_TYPE_16X16;
3259 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3260 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3261 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3262 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3264 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3265 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3266 s->mv_type = MV_TYPE_16X16;
3268 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3269 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3270 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3271 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3272 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3273 &dmin, &next_block, 0, 0);
3275 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3276 s->mv_dir = MV_DIR_FORWARD;
3277 s->mv_type = MV_TYPE_FIELD;
3280 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3281 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3282 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3284 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3285 &dmin, &next_block, 0, 0);
3287 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3288 s->mv_dir = MV_DIR_BACKWARD;
3289 s->mv_type = MV_TYPE_FIELD;
3292 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3293 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3294 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3296 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3297 &dmin, &next_block, 0, 0);
3299 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3300 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3301 s->mv_type = MV_TYPE_FIELD;
3303 for(dir=0; dir<2; dir++){
3305 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3306 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3307 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3310 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3311 &dmin, &next_block, 0, 0);
3313 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3315 s->mv_type = MV_TYPE_16X16;
3319 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3320 &dmin, &next_block, 0, 0);
3321 if(s->h263_pred || s->h263_aic){
3323 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3325 ff_clean_intra_table_entries(s); //old mode?
3329 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3330 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3331 const int last_qp= backup_s.qscale;
3334 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3335 static const int dquant_tab[4]={-1,1,-2,2};
3336 int storecoefs = s->mb_intra && s->dc_val[0];
3338 av_assert2(backup_s.dquant == 0);
3341 s->mv_dir= best_s.mv_dir;
3342 s->mv_type = MV_TYPE_16X16;
3343 s->mb_intra= best_s.mb_intra;
3344 s->mv[0][0][0] = best_s.mv[0][0][0];
3345 s->mv[0][0][1] = best_s.mv[0][0][1];
3346 s->mv[1][0][0] = best_s.mv[1][0][0];
3347 s->mv[1][0][1] = best_s.mv[1][0][1];
3349 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3350 for(; qpi<4; qpi++){
3351 int dquant= dquant_tab[qpi];
3352 qp= last_qp + dquant;
3353 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3355 backup_s.dquant= dquant;
3358 dc[i]= s->dc_val[0][ s->block_index[i] ];
3359 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3363 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3364 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3365 if(best_s.qscale != qp){
3368 s->dc_val[0][ s->block_index[i] ]= dc[i];
3369 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3376 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3377 int mx= s->b_direct_mv_table[xy][0];
3378 int my= s->b_direct_mv_table[xy][1];
3380 backup_s.dquant = 0;
3381 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3383 ff_mpeg4_set_direct_mv(s, mx, my);
3384 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3385 &dmin, &next_block, mx, my);
3387 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3388 backup_s.dquant = 0;
3389 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3391 ff_mpeg4_set_direct_mv(s, 0, 0);
3392 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3393 &dmin, &next_block, 0, 0);
3395 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3398 coded |= s->block_last_index[i];
3401 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3402 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3403 mx=my=0; //FIXME find the one we actually used
3404 ff_mpeg4_set_direct_mv(s, mx, my);
3405 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3413 s->mv_dir= best_s.mv_dir;
3414 s->mv_type = best_s.mv_type;
3416 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3417 s->mv[0][0][1] = best_s.mv[0][0][1];
3418 s->mv[1][0][0] = best_s.mv[1][0][0];
3419 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3422 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3423 &dmin, &next_block, mx, my);
3428 s->current_picture.qscale_table[xy] = best_s.qscale;
3430 copy_context_after_encode(s, &best_s, -1);
3432 pb_bits_count= put_bits_count(&s->pb);
3433 flush_put_bits(&s->pb);
3434 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3437 if(s->data_partitioning){
3438 pb2_bits_count= put_bits_count(&s->pb2);
3439 flush_put_bits(&s->pb2);
3440 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3441 s->pb2= backup_s.pb2;
3443 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3444 flush_put_bits(&s->tex_pb);
3445 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3446 s->tex_pb= backup_s.tex_pb;
3448 s->last_bits= put_bits_count(&s->pb);
3450 if (CONFIG_H263_ENCODER &&
3451 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3452 ff_h263_update_motion_val(s);
3454 if(next_block==0){ //FIXME 16 vs linesize16
3455 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3456 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3457 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3460 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3461 ff_mpv_reconstruct_mb(s, s->block);
3463 int motion_x = 0, motion_y = 0;
3464 s->mv_type=MV_TYPE_16X16;
3465 // only one MB-Type possible
3468 case CANDIDATE_MB_TYPE_INTRA:
3471 motion_x= s->mv[0][0][0] = 0;
3472 motion_y= s->mv[0][0][1] = 0;
3474 case CANDIDATE_MB_TYPE_INTER:
3475 s->mv_dir = MV_DIR_FORWARD;
3477 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3478 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3480 case CANDIDATE_MB_TYPE_INTER_I:
3481 s->mv_dir = MV_DIR_FORWARD;
3482 s->mv_type = MV_TYPE_FIELD;
3485 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3486 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3487 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3490 case CANDIDATE_MB_TYPE_INTER4V:
3491 s->mv_dir = MV_DIR_FORWARD;
3492 s->mv_type = MV_TYPE_8X8;
3495 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3496 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3499 case CANDIDATE_MB_TYPE_DIRECT:
3500 if (CONFIG_MPEG4_ENCODER) {
3501 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3503 motion_x=s->b_direct_mv_table[xy][0];
3504 motion_y=s->b_direct_mv_table[xy][1];
3505 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3508 case CANDIDATE_MB_TYPE_DIRECT0:
3509 if (CONFIG_MPEG4_ENCODER) {
3510 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3512 ff_mpeg4_set_direct_mv(s, 0, 0);
3515 case CANDIDATE_MB_TYPE_BIDIR:
3516 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3518 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3519 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3520 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3521 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3523 case CANDIDATE_MB_TYPE_BACKWARD:
3524 s->mv_dir = MV_DIR_BACKWARD;
3526 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3527 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3529 case CANDIDATE_MB_TYPE_FORWARD:
3530 s->mv_dir = MV_DIR_FORWARD;
3532 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3533 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3535 case CANDIDATE_MB_TYPE_FORWARD_I:
3536 s->mv_dir = MV_DIR_FORWARD;
3537 s->mv_type = MV_TYPE_FIELD;
3540 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3541 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3542 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3545 case CANDIDATE_MB_TYPE_BACKWARD_I:
3546 s->mv_dir = MV_DIR_BACKWARD;
3547 s->mv_type = MV_TYPE_FIELD;
3550 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3551 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3552 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3555 case CANDIDATE_MB_TYPE_BIDIR_I:
3556 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3557 s->mv_type = MV_TYPE_FIELD;
3559 for(dir=0; dir<2; dir++){
3561 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3562 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3563 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3568 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3571 encode_mb(s, motion_x, motion_y);
3573 // RAL: Update last macroblock type
3574 s->last_mv_dir = s->mv_dir;
3576 if (CONFIG_H263_ENCODER &&
3577 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3578 ff_h263_update_motion_val(s);
3580 ff_mpv_reconstruct_mb(s, s->block);
3583 /* clean the MV table in IPS frames for direct mode in B-frames */
3584 if(s->mb_intra /* && I,P,S_TYPE */){
3585 s->p_mv_table[xy][0]=0;
3586 s->p_mv_table[xy][1]=0;
3589 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3593 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3594 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3596 s->current_picture.encoding_error[0] += sse(
3597 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3598 s->dest[0], w, h, s->linesize);
3599 s->current_picture.encoding_error[1] += sse(
3600 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3601 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3602 s->current_picture.encoding_error[2] += sse(
3603 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3604 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3607 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3608 ff_h263_loop_filter(s);
3610 ff_dlog(s->avctx, "MB %d %d bits\n",
3611 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3615 //not beautiful here but we must write it before flushing so it has to be here
3616 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3617 ff_msmpeg4_encode_ext_header(s);
3621 #if FF_API_RTP_CALLBACK
3622 FF_DISABLE_DEPRECATION_WARNINGS
3623 /* Send the last GOB if RTP */
3624 if (s->avctx->rtp_callback) {
3625 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3626 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3627 /* Call the RTP callback to send the last GOB */
3629 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3631 FF_ENABLE_DEPRECATION_WARNINGS
3637 #define MERGE(field) dst->field += src->field; src->field=0
3638 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3639 MERGE(me.scene_change_score);
3640 MERGE(me.mc_mb_var_sum_temp);
3641 MERGE(me.mb_var_sum_temp);
3644 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3647 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3648 MERGE(dct_count[1]);
3657 MERGE(er.error_count);
3658 MERGE(padding_bug_score);
3659 MERGE(current_picture.encoding_error[0]);
3660 MERGE(current_picture.encoding_error[1]);
3661 MERGE(current_picture.encoding_error[2]);
3663 if (dst->noise_reduction){
3664 for(i=0; i<64; i++){
3665 MERGE(dct_error_sum[0][i]);
3666 MERGE(dct_error_sum[1][i]);
3670 assert(put_bits_count(&src->pb) % 8 ==0);
3671 assert(put_bits_count(&dst->pb) % 8 ==0);
3672 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3673 flush_put_bits(&dst->pb);
3676 static int estimate_qp(MpegEncContext *s, int dry_run){
3677 if (s->next_lambda){
3678 s->current_picture_ptr->f->quality =
3679 s->current_picture.f->quality = s->next_lambda;
3680 if(!dry_run) s->next_lambda= 0;
3681 } else if (!s->fixed_qscale) {
3684 if ((s->avctx->flags & AV_CODEC_FLAG_PASS2) && s->rc_strategy == MPV_RC_STRATEGY_XVID)
3685 quality = ff_xvid_rate_estimate_qscale(s, dry_run);
3688 quality = ff_rate_estimate_qscale(s, dry_run);
3689 s->current_picture_ptr->f->quality =
3690 s->current_picture.f->quality = quality;
3691 if (s->current_picture.f->quality < 0)
3695 if(s->adaptive_quant){
3696 switch(s->codec_id){
3697 case AV_CODEC_ID_MPEG4:
3698 if (CONFIG_MPEG4_ENCODER)
3699 ff_clean_mpeg4_qscales(s);
3701 case AV_CODEC_ID_H263:
3702 case AV_CODEC_ID_H263P:
3703 case AV_CODEC_ID_FLV1:
3704 if (CONFIG_H263_ENCODER)
3705 ff_clean_h263_qscales(s);
3708 ff_init_qscale_tab(s);
3711 s->lambda= s->lambda_table[0];
3714 s->lambda = s->current_picture.f->quality;
3719 /* must be called before writing the header */
3720 static void set_frame_distances(MpegEncContext * s){
3721 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3722 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3724 if(s->pict_type==AV_PICTURE_TYPE_B){
3725 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3726 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3728 s->pp_time= s->time - s->last_non_b_time;
3729 s->last_non_b_time= s->time;
3730 assert(s->picture_number==0 || s->pp_time > 0);
3734 static int encode_picture(MpegEncContext *s, int picture_number)
3738 int context_count = s->slice_context_count;
3740 s->picture_number = picture_number;
3742 /* Reset the average MB variance */
3743 s->me.mb_var_sum_temp =
3744 s->me.mc_mb_var_sum_temp = 0;
3746 /* we need to initialize some time vars before we can encode B-frames */
3747 // RAL: Condition added for MPEG1VIDEO
3748 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3749 set_frame_distances(s);
3750 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3751 ff_set_mpeg4_time(s);
3753 s->me.scene_change_score=0;
3755 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3757 if(s->pict_type==AV_PICTURE_TYPE_I){
3758 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3759 else s->no_rounding=0;
3760 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3761 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3762 s->no_rounding ^= 1;
3765 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3766 if (estimate_qp(s,1) < 0)
3768 ff_get_2pass_fcode(s);
3769 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3770 if(s->pict_type==AV_PICTURE_TYPE_B)
3771 s->lambda= s->last_lambda_for[s->pict_type];
3773 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3777 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3778 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3779 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3780 s->q_chroma_intra_matrix = s->q_intra_matrix;
3781 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3784 s->mb_intra=0; //for the rate distortion & bit compare functions
3785 for(i=1; i<context_count; i++){
3786 ret = ff_update_duplicate_context(s->thread_context[i], s);
3794 /* Estimate motion for every MB */
3795 if(s->pict_type != AV_PICTURE_TYPE_I){
3796 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3797 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3798 if (s->pict_type != AV_PICTURE_TYPE_B) {
3799 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3801 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3805 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3806 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3808 for(i=0; i<s->mb_stride*s->mb_height; i++)
3809 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3811 if(!s->fixed_qscale){
3812 /* finding spatial complexity for I-frame rate control */
3813 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3816 for(i=1; i<context_count; i++){
3817 merge_context_after_me(s, s->thread_context[i]);
3819 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3820 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3823 if (s->me.scene_change_score > s->scenechange_threshold &&
3824 s->pict_type == AV_PICTURE_TYPE_P) {
3825 s->pict_type= AV_PICTURE_TYPE_I;
3826 for(i=0; i<s->mb_stride*s->mb_height; i++)
3827 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3828 if(s->msmpeg4_version >= 3)
3830 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3831 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3835 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3836 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3838 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3840 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3841 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3842 s->f_code= FFMAX3(s->f_code, a, b);
3845 ff_fix_long_p_mvs(s);
3846 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3847 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3851 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3852 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3857 if(s->pict_type==AV_PICTURE_TYPE_B){
3860 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3861 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3862 s->f_code = FFMAX(a, b);
3864 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3865 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3866 s->b_code = FFMAX(a, b);
3868 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3869 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3870 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3871 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3872 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3874 for(dir=0; dir<2; dir++){
3877 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3878 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3879 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3880 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3888 if (estimate_qp(s, 0) < 0)
3891 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3892 s->pict_type == AV_PICTURE_TYPE_I &&
3893 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3894 s->qscale= 3; //reduce clipping problems
3896 if (s->out_format == FMT_MJPEG) {
3897 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3898 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3900 if (s->avctx->intra_matrix) {
3902 luma_matrix = s->avctx->intra_matrix;
3904 if (s->avctx->chroma_intra_matrix)
3905 chroma_matrix = s->avctx->chroma_intra_matrix;
3907 /* for mjpeg, we do include qscale in the matrix */
3909 int j = s->idsp.idct_permutation[i];
3911 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3912 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3914 s->y_dc_scale_table=
3915 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3916 s->chroma_intra_matrix[0] =
3917 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3918 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3919 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3920 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3921 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3924 if(s->codec_id == AV_CODEC_ID_AMV){
3925 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3926 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3928 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3930 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3931 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3933 s->y_dc_scale_table= y;
3934 s->c_dc_scale_table= c;
3935 s->intra_matrix[0] = 13;
3936 s->chroma_intra_matrix[0] = 14;
3937 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3938 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3939 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3940 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3944 //FIXME var duplication
3945 s->current_picture_ptr->f->key_frame =
3946 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3947 s->current_picture_ptr->f->pict_type =
3948 s->current_picture.f->pict_type = s->pict_type;
3950 if (s->current_picture.f->key_frame)
3951 s->picture_in_gop_number=0;
3953 s->mb_x = s->mb_y = 0;
3954 s->last_bits= put_bits_count(&s->pb);
3955 switch(s->out_format) {
3957 if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3958 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3959 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3962 if (CONFIG_H261_ENCODER)
3963 ff_h261_encode_picture_header(s, picture_number);
3966 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3967 ff_wmv2_encode_picture_header(s, picture_number);
3968 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3969 ff_msmpeg4_encode_picture_header(s, picture_number);
3970 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3971 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3974 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3975 ret = ff_rv10_encode_picture_header(s, picture_number);
3979 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3980 ff_rv20_encode_picture_header(s, picture_number);
3981 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3982 ff_flv_encode_picture_header(s, picture_number);
3983 else if (CONFIG_H263_ENCODER)
3984 ff_h263_encode_picture_header(s, picture_number);
3987 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3988 ff_mpeg1_encode_picture_header(s, picture_number);
3993 bits= put_bits_count(&s->pb);
3994 s->header_bits= bits - s->last_bits;
3996 for(i=1; i<context_count; i++){
3997 update_duplicate_context_after_me(s->thread_context[i], s);
3999 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
4000 for(i=1; i<context_count; i++){
4001 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
4002 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
4003 merge_context_after_encode(s, s->thread_context[i]);
4009 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
4010 const int intra= s->mb_intra;
4013 s->dct_count[intra]++;
4015 for(i=0; i<64; i++){
4016 int level= block[i];
4020 s->dct_error_sum[intra][i] += level;
4021 level -= s->dct_offset[intra][i];
4022 if(level<0) level=0;
4024 s->dct_error_sum[intra][i] -= level;
4025 level += s->dct_offset[intra][i];
4026 if(level>0) level=0;
4033 static int dct_quantize_trellis_c(MpegEncContext *s,
4034 int16_t *block, int n,
4035 int qscale, int *overflow){
4037 const uint16_t *matrix;
4038 const uint8_t *scantable;
4039 const uint8_t *perm_scantable;
4041 unsigned int threshold1, threshold2;
4053 int coeff_count[64];
4054 int qmul, qadd, start_i, last_non_zero, i, dc;
4055 const int esc_length= s->ac_esc_length;
4057 uint8_t * last_length;
4058 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4061 s->fdsp.fdct(block);
4063 if(s->dct_error_sum)
4064 s->denoise_dct(s, block);
4066 qadd= ((qscale-1)|1)*8;
4068 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4069 else mpeg2_qscale = qscale << 1;
4073 scantable= s->intra_scantable.scantable;
4074 perm_scantable= s->intra_scantable.permutated;
4082 /* For AIC we skip quant/dequant of INTRADC */
4087 /* note: block[0] is assumed to be positive */
4088 block[0] = (block[0] + (q >> 1)) / q;
4091 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4092 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4093 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4094 bias= 1<<(QMAT_SHIFT-1);
4096 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4097 length = s->intra_chroma_ac_vlc_length;
4098 last_length= s->intra_chroma_ac_vlc_last_length;
4100 length = s->intra_ac_vlc_length;
4101 last_length= s->intra_ac_vlc_last_length;
4104 scantable= s->inter_scantable.scantable;
4105 perm_scantable= s->inter_scantable.permutated;
4108 qmat = s->q_inter_matrix[qscale];
4109 matrix = s->inter_matrix;
4110 length = s->inter_ac_vlc_length;
4111 last_length= s->inter_ac_vlc_last_length;
4115 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4116 threshold2= (threshold1<<1);
4118 for(i=63; i>=start_i; i--) {
4119 const int j = scantable[i];
4120 int level = block[j] * qmat[j];
4122 if(((unsigned)(level+threshold1))>threshold2){
4128 for(i=start_i; i<=last_non_zero; i++) {
4129 const int j = scantable[i];
4130 int level = block[j] * qmat[j];
4132 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4133 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4134 if(((unsigned)(level+threshold1))>threshold2){
4136 level= (bias + level)>>QMAT_SHIFT;
4138 coeff[1][i]= level-1;
4139 // coeff[2][k]= level-2;
4141 level= (bias - level)>>QMAT_SHIFT;
4142 coeff[0][i]= -level;
4143 coeff[1][i]= -level+1;
4144 // coeff[2][k]= -level+2;
4146 coeff_count[i]= FFMIN(level, 2);
4147 av_assert2(coeff_count[i]);
4150 coeff[0][i]= (level>>31)|1;
4155 *overflow= s->max_qcoeff < max; //overflow might have happened
4157 if(last_non_zero < start_i){
4158 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4159 return last_non_zero;
4162 score_tab[start_i]= 0;
4163 survivor[0]= start_i;
4166 for(i=start_i; i<=last_non_zero; i++){
4167 int level_index, j, zero_distortion;
4168 int dct_coeff= FFABS(block[ scantable[i] ]);
4169 int best_score=256*256*256*120;
4171 if (s->fdsp.fdct == ff_fdct_ifast)
4172 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4173 zero_distortion= dct_coeff*dct_coeff;
4175 for(level_index=0; level_index < coeff_count[i]; level_index++){
4177 int level= coeff[level_index][i];
4178 const int alevel= FFABS(level);
4183 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4184 unquant_coeff= alevel*qmul + qadd;
4185 } else if(s->out_format == FMT_MJPEG) {
4186 j = s->idsp.idct_permutation[scantable[i]];
4187 unquant_coeff = alevel * matrix[j] * 8;
4189 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4191 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4192 unquant_coeff = (unquant_coeff - 1) | 1;
4194 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4195 unquant_coeff = (unquant_coeff - 1) | 1;
4200 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4202 if((level&(~127)) == 0){
4203 for(j=survivor_count-1; j>=0; j--){
4204 int run= i - survivor[j];
4205 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4206 score += score_tab[i-run];
4208 if(score < best_score){
4211 level_tab[i+1]= level-64;
4215 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4216 for(j=survivor_count-1; j>=0; j--){
4217 int run= i - survivor[j];
4218 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4219 score += score_tab[i-run];
4220 if(score < last_score){
4223 last_level= level-64;
4229 distortion += esc_length*lambda;
4230 for(j=survivor_count-1; j>=0; j--){
4231 int run= i - survivor[j];
4232 int score= distortion + score_tab[i-run];
4234 if(score < best_score){
4237 level_tab[i+1]= level-64;
4241 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4242 for(j=survivor_count-1; j>=0; j--){
4243 int run= i - survivor[j];
4244 int score= distortion + score_tab[i-run];
4245 if(score < last_score){
4248 last_level= level-64;
4256 score_tab[i+1]= best_score;
4258 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4259 if(last_non_zero <= 27){
4260 for(; survivor_count; survivor_count--){
4261 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4265 for(; survivor_count; survivor_count--){
4266 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4271 survivor[ survivor_count++ ]= i+1;
4274 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4275 last_score= 256*256*256*120;
4276 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4277 int score= score_tab[i];
4279 score += lambda * 2; // FIXME more exact?
4281 if(score < last_score){
4284 last_level= level_tab[i];
4285 last_run= run_tab[i];
4290 s->coded_score[n] = last_score;
4292 dc= FFABS(block[0]);
4293 last_non_zero= last_i - 1;
4294 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4296 if(last_non_zero < start_i)
4297 return last_non_zero;
4299 if(last_non_zero == 0 && start_i == 0){
4301 int best_score= dc * dc;
4303 for(i=0; i<coeff_count[0]; i++){
4304 int level= coeff[i][0];
4305 int alevel= FFABS(level);
4306 int unquant_coeff, score, distortion;
4308 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4309 unquant_coeff= (alevel*qmul + qadd)>>3;
4311 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4312 unquant_coeff = (unquant_coeff - 1) | 1;
4314 unquant_coeff = (unquant_coeff + 4) >> 3;
4315 unquant_coeff<<= 3 + 3;
4317 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4319 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4320 else score= distortion + esc_length*lambda;
4322 if(score < best_score){
4324 best_level= level - 64;
4327 block[0]= best_level;
4328 s->coded_score[n] = best_score - dc*dc;
4329 if(best_level == 0) return -1;
4330 else return last_non_zero;
4334 av_assert2(last_level);
4336 block[ perm_scantable[last_non_zero] ]= last_level;
4339 for(; i>start_i; i -= run_tab[i] + 1){
4340 block[ perm_scantable[i-1] ]= level_tab[i];
4343 return last_non_zero;
4346 //#define REFINE_STATS 1
4347 static int16_t basis[64][64];
4349 static void build_basis(uint8_t *perm){
4356 double s= 0.25*(1<<BASIS_SHIFT);
4358 int perm_index= perm[index];
4359 if(i==0) s*= sqrt(0.5);
4360 if(j==0) s*= sqrt(0.5);
4361 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4368 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4369 int16_t *block, int16_t *weight, int16_t *orig,
4372 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4373 const uint8_t *scantable;
4374 const uint8_t *perm_scantable;
4375 // unsigned int threshold1, threshold2;
4380 int qmul, qadd, start_i, last_non_zero, i, dc;
4382 uint8_t * last_length;
4384 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4387 static int after_last=0;
4388 static int to_zero=0;
4389 static int from_zero=0;
4392 static int messed_sign=0;
4395 if(basis[0][0] == 0)
4396 build_basis(s->idsp.idct_permutation);
4401 scantable= s->intra_scantable.scantable;
4402 perm_scantable= s->intra_scantable.permutated;
4409 /* For AIC we skip quant/dequant of INTRADC */
4413 q <<= RECON_SHIFT-3;
4414 /* note: block[0] is assumed to be positive */
4416 // block[0] = (block[0] + (q >> 1)) / q;
4418 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4419 // bias= 1<<(QMAT_SHIFT-1);
4420 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4421 length = s->intra_chroma_ac_vlc_length;
4422 last_length= s->intra_chroma_ac_vlc_last_length;
4424 length = s->intra_ac_vlc_length;
4425 last_length= s->intra_ac_vlc_last_length;
4428 scantable= s->inter_scantable.scantable;
4429 perm_scantable= s->inter_scantable.permutated;
4432 length = s->inter_ac_vlc_length;
4433 last_length= s->inter_ac_vlc_last_length;
4435 last_non_zero = s->block_last_index[n];
4440 dc += (1<<(RECON_SHIFT-1));
4441 for(i=0; i<64; i++){
4442 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4445 STOP_TIMER("memset rem[]")}
4448 for(i=0; i<64; i++){
4453 w= FFABS(weight[i]) + qns*one;
4454 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4457 // w=weight[i] = (63*qns + (w/2)) / w;
4460 av_assert2(w<(1<<6));
4463 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4469 for(i=start_i; i<=last_non_zero; i++){
4470 int j= perm_scantable[i];
4471 const int level= block[j];
4475 if(level<0) coeff= qmul*level - qadd;
4476 else coeff= qmul*level + qadd;
4477 run_tab[rle_index++]=run;
4480 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4486 if(last_non_zero>0){
4487 STOP_TIMER("init rem[]")
4494 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4497 int run2, best_unquant_change=0, analyze_gradient;
4501 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4503 if(analyze_gradient){
4507 for(i=0; i<64; i++){
4510 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4513 STOP_TIMER("rem*w*w")}
4523 const int level= block[0];
4524 int change, old_coeff;
4526 av_assert2(s->mb_intra);
4530 for(change=-1; change<=1; change+=2){
4531 int new_level= level + change;
4532 int score, new_coeff;
4534 new_coeff= q*new_level;
4535 if(new_coeff >= 2048 || new_coeff < 0)
4538 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4539 new_coeff - old_coeff);
4540 if(score<best_score){
4543 best_change= change;
4544 best_unquant_change= new_coeff - old_coeff;
4551 run2= run_tab[rle_index++];
4555 for(i=start_i; i<64; i++){
4556 int j= perm_scantable[i];
4557 const int level= block[j];
4558 int change, old_coeff;
4560 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4564 if(level<0) old_coeff= qmul*level - qadd;
4565 else old_coeff= qmul*level + qadd;
4566 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4570 av_assert2(run2>=0 || i >= last_non_zero );
4573 for(change=-1; change<=1; change+=2){
4574 int new_level= level + change;
4575 int score, new_coeff, unquant_change;
4578 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4582 if(new_level<0) new_coeff= qmul*new_level - qadd;
4583 else new_coeff= qmul*new_level + qadd;
4584 if(new_coeff >= 2048 || new_coeff <= -2048)
4586 //FIXME check for overflow
4589 if(level < 63 && level > -63){
4590 if(i < last_non_zero)
4591 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4592 - length[UNI_AC_ENC_INDEX(run, level+64)];
4594 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4595 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4598 av_assert2(FFABS(new_level)==1);
4600 if(analyze_gradient){
4601 int g= d1[ scantable[i] ];
4602 if(g && (g^new_level) >= 0)
4606 if(i < last_non_zero){
4607 int next_i= i + run2 + 1;
4608 int next_level= block[ perm_scantable[next_i] ] + 64;
4610 if(next_level&(~127))
4613 if(next_i < last_non_zero)
4614 score += length[UNI_AC_ENC_INDEX(run, 65)]
4615 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4616 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4618 score += length[UNI_AC_ENC_INDEX(run, 65)]
4619 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4620 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4622 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4624 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4625 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4631 av_assert2(FFABS(level)==1);
4633 if(i < last_non_zero){
4634 int next_i= i + run2 + 1;
4635 int next_level= block[ perm_scantable[next_i] ] + 64;
4637 if(next_level&(~127))
4640 if(next_i < last_non_zero)
4641 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4642 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4643 - length[UNI_AC_ENC_INDEX(run, 65)];
4645 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4646 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4647 - length[UNI_AC_ENC_INDEX(run, 65)];
4649 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4651 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4652 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4659 unquant_change= new_coeff - old_coeff;
4660 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4662 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4664 if(score<best_score){
4667 best_change= change;
4668 best_unquant_change= unquant_change;
4672 prev_level= level + 64;
4673 if(prev_level&(~127))
4682 STOP_TIMER("iterative step")}
4686 int j= perm_scantable[ best_coeff ];
4688 block[j] += best_change;
4690 if(best_coeff > last_non_zero){
4691 last_non_zero= best_coeff;
4692 av_assert2(block[j]);
4699 if(block[j] - best_change){
4700 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4712 for(; last_non_zero>=start_i; last_non_zero--){
4713 if(block[perm_scantable[last_non_zero]])
4719 if(256*256*256*64 % count == 0){
4720 av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4725 for(i=start_i; i<=last_non_zero; i++){
4726 int j= perm_scantable[i];
4727 const int level= block[j];
4730 run_tab[rle_index++]=run;
4737 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4743 if(last_non_zero>0){
4744 STOP_TIMER("iterative search")
4749 return last_non_zero;
4753 * Permute an 8x8 block according to permutation.
4754 * @param block the block which will be permuted according to
4755 * the given permutation vector
4756 * @param permutation the permutation vector
4757 * @param last the last non zero coefficient in scantable order, used to
4758 * speed the permutation up
4759 * @param scantable the used scantable, this is only used to speed the
4760 * permutation up, the block is not (inverse) permutated
4761 * to scantable order!
4763 void ff_block_permute(int16_t *block, uint8_t *permutation,
4764 const uint8_t *scantable, int last)
4771 //FIXME it is ok but not clean and might fail for some permutations
4772 // if (permutation[1] == 1)
4775 for (i = 0; i <= last; i++) {
4776 const int j = scantable[i];
4781 for (i = 0; i <= last; i++) {
4782 const int j = scantable[i];
4783 const int perm_j = permutation[j];
4784 block[perm_j] = temp[j];
4788 int ff_dct_quantize_c(MpegEncContext *s,
4789 int16_t *block, int n,
4790 int qscale, int *overflow)
4792 int i, j, level, last_non_zero, q, start_i;
4794 const uint8_t *scantable;
4797 unsigned int threshold1, threshold2;
4799 s->fdsp.fdct(block);
4801 if(s->dct_error_sum)
4802 s->denoise_dct(s, block);
4805 scantable= s->intra_scantable.scantable;
4813 /* For AIC we skip quant/dequant of INTRADC */
4816 /* note: block[0] is assumed to be positive */
4817 block[0] = (block[0] + (q >> 1)) / q;
4820 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4821 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4823 scantable= s->inter_scantable.scantable;
4826 qmat = s->q_inter_matrix[qscale];
4827 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4829 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4830 threshold2= (threshold1<<1);
4831 for(i=63;i>=start_i;i--) {
4833 level = block[j] * qmat[j];
4835 if(((unsigned)(level+threshold1))>threshold2){
4842 for(i=start_i; i<=last_non_zero; i++) {
4844 level = block[j] * qmat[j];
4846 // if( bias+level >= (1<<QMAT_SHIFT)
4847 // || bias-level >= (1<<QMAT_SHIFT)){
4848 if(((unsigned)(level+threshold1))>threshold2){
4850 level= (bias + level)>>QMAT_SHIFT;
4853 level= (bias - level)>>QMAT_SHIFT;
4861 *overflow= s->max_qcoeff < max; //overflow might have happened
4863 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4864 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4865 ff_block_permute(block, s->idsp.idct_permutation,
4866 scantable, last_non_zero);
4868 return last_non_zero;
4871 #define OFFSET(x) offsetof(MpegEncContext, x)
4872 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4873 static const AVOption h263_options[] = {
4874 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4875 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4880 static const AVClass h263_class = {
4881 .class_name = "H.263 encoder",
4882 .item_name = av_default_item_name,
4883 .option = h263_options,
4884 .version = LIBAVUTIL_VERSION_INT,
4887 AVCodec ff_h263_encoder = {
4889 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4890 .type = AVMEDIA_TYPE_VIDEO,
4891 .id = AV_CODEC_ID_H263,
4892 .priv_data_size = sizeof(MpegEncContext),
4893 .init = ff_mpv_encode_init,
4894 .encode2 = ff_mpv_encode_picture,
4895 .close = ff_mpv_encode_end,
4896 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4897 .priv_class = &h263_class,
4900 static const AVOption h263p_options[] = {
4901 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4902 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4903 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4904 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4908 static const AVClass h263p_class = {
4909 .class_name = "H.263p encoder",
4910 .item_name = av_default_item_name,
4911 .option = h263p_options,
4912 .version = LIBAVUTIL_VERSION_INT,
4915 AVCodec ff_h263p_encoder = {
4917 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4918 .type = AVMEDIA_TYPE_VIDEO,
4919 .id = AV_CODEC_ID_H263P,
4920 .priv_data_size = sizeof(MpegEncContext),
4921 .init = ff_mpv_encode_init,
4922 .encode2 = ff_mpv_encode_picture,
4923 .close = ff_mpv_encode_end,
4924 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4925 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4926 .priv_class = &h263p_class,
4929 static const AVClass msmpeg4v2_class = {
4930 .class_name = "msmpeg4v2 encoder",
4931 .item_name = av_default_item_name,
4932 .option = ff_mpv_generic_options,
4933 .version = LIBAVUTIL_VERSION_INT,
4936 AVCodec ff_msmpeg4v2_encoder = {
4937 .name = "msmpeg4v2",
4938 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4939 .type = AVMEDIA_TYPE_VIDEO,
4940 .id = AV_CODEC_ID_MSMPEG4V2,
4941 .priv_data_size = sizeof(MpegEncContext),
4942 .init = ff_mpv_encode_init,
4943 .encode2 = ff_mpv_encode_picture,
4944 .close = ff_mpv_encode_end,
4945 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4946 .priv_class = &msmpeg4v2_class,
4949 static const AVClass msmpeg4v3_class = {
4950 .class_name = "msmpeg4v3 encoder",
4951 .item_name = av_default_item_name,
4952 .option = ff_mpv_generic_options,
4953 .version = LIBAVUTIL_VERSION_INT,
4956 AVCodec ff_msmpeg4v3_encoder = {
4958 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4959 .type = AVMEDIA_TYPE_VIDEO,
4960 .id = AV_CODEC_ID_MSMPEG4V3,
4961 .priv_data_size = sizeof(MpegEncContext),
4962 .init = ff_mpv_encode_init,
4963 .encode2 = ff_mpv_encode_picture,
4964 .close = ff_mpv_encode_end,
4965 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4966 .priv_class = &msmpeg4v3_class,
4969 static const AVClass wmv1_class = {
4970 .class_name = "wmv1 encoder",
4971 .item_name = av_default_item_name,
4972 .option = ff_mpv_generic_options,
4973 .version = LIBAVUTIL_VERSION_INT,
4976 AVCodec ff_wmv1_encoder = {
4978 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4979 .type = AVMEDIA_TYPE_VIDEO,
4980 .id = AV_CODEC_ID_WMV1,
4981 .priv_data_size = sizeof(MpegEncContext),
4982 .init = ff_mpv_encode_init,
4983 .encode2 = ff_mpv_encode_picture,
4984 .close = ff_mpv_encode_end,
4985 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4986 .priv_class = &wmv1_class,