2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "libavutil/timer.h"
46 #include "mpegvideo.h"
47 #include "mpegvideodata.h"
51 #include "mjpegenc_common.h"
53 #include "mpegutils.h"
56 #include "pixblockdsp.h"
60 #include "aandcttab.h"
62 #include "mpeg4video.h"
64 #include "bytestream.h"
70 #define QUANT_BIAS_SHIFT 8
72 #define QMAT_SHIFT_MMX 16
75 static int encode_picture(MpegEncContext *s, int picture_number);
76 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
77 static int sse_mb(MpegEncContext *s);
78 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
79 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
81 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
82 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
84 const AVOption ff_mpv_generic_options[] = {
89 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
90 uint16_t (*qmat16)[2][64],
91 const uint16_t *quant_matrix,
92 int bias, int qmin, int qmax, int intra)
94 FDCTDSPContext *fdsp = &s->fdsp;
98 for (qscale = qmin; qscale <= qmax; qscale++) {
102 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
103 else qscale2 = qscale << 1;
105 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
107 fdsp->fdct == ff_faandct ||
108 #endif /* CONFIG_FAANDCT */
109 fdsp->fdct == ff_jpeg_fdct_islow_10) {
110 for (i = 0; i < 64; i++) {
111 const int j = s->idsp.idct_permutation[i];
112 int64_t den = (int64_t) qscale2 * quant_matrix[j];
113 /* 16 <= qscale * quant_matrix[i] <= 7905
114 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
115 * 19952 <= x <= 249205026
116 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
117 * 3444240 >= (1 << 36) / (x) >= 275 */
119 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
121 } else if (fdsp->fdct == ff_fdct_ifast) {
122 for (i = 0; i < 64; i++) {
123 const int j = s->idsp.idct_permutation[i];
124 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
125 /* 16 <= qscale * quant_matrix[i] <= 7905
126 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
127 * 19952 <= x <= 249205026
128 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
129 * 3444240 >= (1 << 36) / (x) >= 275 */
131 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
134 for (i = 0; i < 64; i++) {
135 const int j = s->idsp.idct_permutation[i];
136 int64_t den = (int64_t) qscale2 * quant_matrix[j];
137 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
138 * Assume x = qscale * quant_matrix[i]
140 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
141 * so 32768 >= (1 << 19) / (x) >= 67 */
142 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
143 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
144 // (qscale * quant_matrix[i]);
145 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
147 if (qmat16[qscale][0][i] == 0 ||
148 qmat16[qscale][0][i] == 128 * 256)
149 qmat16[qscale][0][i] = 128 * 256 - 1;
150 qmat16[qscale][1][i] =
151 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
152 qmat16[qscale][0][i]);
156 for (i = intra; i < 64; i++) {
158 if (fdsp->fdct == ff_fdct_ifast) {
159 max = (8191LL * ff_aanscales[i]) >> 14;
161 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
167 av_log(NULL, AV_LOG_INFO,
168 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
173 static inline void update_qscale(MpegEncContext *s)
175 if (s->q_scale_type == 1 && 0) {
177 int bestdiff=INT_MAX;
180 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
181 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
182 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
183 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
185 if (diff < bestdiff) {
192 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
193 (FF_LAMBDA_SHIFT + 7);
194 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
197 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
201 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
207 for (i = 0; i < 64; i++) {
208 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
215 * init s->current_picture.qscale_table from s->lambda_table
217 void ff_init_qscale_tab(MpegEncContext *s)
219 int8_t * const qscale_table = s->current_picture.qscale_table;
222 for (i = 0; i < s->mb_num; i++) {
223 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
224 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
225 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
230 static void update_duplicate_context_after_me(MpegEncContext *dst,
233 #define COPY(a) dst->a= src->a
235 COPY(current_picture);
241 COPY(picture_in_gop_number);
242 COPY(gop_picture_number);
243 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
244 COPY(progressive_frame); // FIXME don't set in encode_header
245 COPY(partitioned_frame); // FIXME don't set in encode_header
250 * Set the given MpegEncContext to defaults for encoding.
251 * the changed fields will not depend upon the prior state of the MpegEncContext.
253 static void mpv_encode_defaults(MpegEncContext *s)
256 ff_mpv_common_defaults(s);
258 for (i = -16; i < 16; i++) {
259 default_fcode_tab[i + MAX_MV] = 1;
261 s->me.mv_penalty = default_mv_penalty;
262 s->fcode_tab = default_fcode_tab;
264 s->input_picture_number = 0;
265 s->picture_in_gop_number = 0;
268 av_cold int ff_dct_encode_init(MpegEncContext *s) {
270 ff_dct_encode_init_x86(s);
272 if (CONFIG_H263_ENCODER)
273 ff_h263dsp_init(&s->h263dsp);
274 if (!s->dct_quantize)
275 s->dct_quantize = ff_dct_quantize_c;
277 s->denoise_dct = denoise_dct_c;
278 s->fast_dct_quantize = s->dct_quantize;
279 if (s->avctx->trellis)
280 s->dct_quantize = dct_quantize_trellis_c;
285 /* init video encoder */
286 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
288 MpegEncContext *s = avctx->priv_data;
289 AVCPBProperties *cpb_props;
290 int i, ret, format_supported;
292 mpv_encode_defaults(s);
294 switch (avctx->codec_id) {
295 case AV_CODEC_ID_MPEG2VIDEO:
296 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
297 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
298 av_log(avctx, AV_LOG_ERROR,
299 "only YUV420 and YUV422 are supported\n");
303 case AV_CODEC_ID_MJPEG:
304 case AV_CODEC_ID_AMV:
305 format_supported = 0;
306 /* JPEG color space */
307 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
308 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
309 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
310 (avctx->color_range == AVCOL_RANGE_JPEG &&
311 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
312 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
313 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
314 format_supported = 1;
315 /* MPEG color space */
316 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
317 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
318 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
319 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
320 format_supported = 1;
322 if (!format_supported) {
323 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
328 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
329 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
334 switch (avctx->pix_fmt) {
335 case AV_PIX_FMT_YUVJ444P:
336 case AV_PIX_FMT_YUV444P:
337 s->chroma_format = CHROMA_444;
339 case AV_PIX_FMT_YUVJ422P:
340 case AV_PIX_FMT_YUV422P:
341 s->chroma_format = CHROMA_422;
343 case AV_PIX_FMT_YUVJ420P:
344 case AV_PIX_FMT_YUV420P:
346 s->chroma_format = CHROMA_420;
350 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
351 s->bit_rate = avctx->bit_rate;
352 s->width = avctx->width;
353 s->height = avctx->height;
354 if (avctx->gop_size > 600 &&
355 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
356 av_log(avctx, AV_LOG_WARNING,
357 "keyframe interval too large!, reducing it from %d to %d\n",
358 avctx->gop_size, 600);
359 avctx->gop_size = 600;
361 s->gop_size = avctx->gop_size;
363 if (avctx->max_b_frames > MAX_B_FRAMES) {
364 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
365 "is %d.\n", MAX_B_FRAMES);
366 avctx->max_b_frames = MAX_B_FRAMES;
368 s->max_b_frames = avctx->max_b_frames;
369 s->codec_id = avctx->codec->id;
370 s->strict_std_compliance = avctx->strict_std_compliance;
371 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
372 s->mpeg_quant = avctx->mpeg_quant;
373 s->rtp_mode = !!avctx->rtp_payload_size;
374 s->intra_dc_precision = avctx->intra_dc_precision;
376 // workaround some differences between how applications specify dc precision
377 if (s->intra_dc_precision < 0) {
378 s->intra_dc_precision += 8;
379 } else if (s->intra_dc_precision >= 8)
380 s->intra_dc_precision -= 8;
382 if (s->intra_dc_precision < 0) {
383 av_log(avctx, AV_LOG_ERROR,
384 "intra dc precision must be positive, note some applications use"
385 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
386 return AVERROR(EINVAL);
389 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
390 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
391 return AVERROR(EINVAL);
393 s->user_specified_pts = AV_NOPTS_VALUE;
395 if (s->gop_size <= 1) {
402 #if FF_API_MOTION_EST
403 FF_DISABLE_DEPRECATION_WARNINGS
404 s->me_method = avctx->me_method;
405 FF_ENABLE_DEPRECATION_WARNINGS
409 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
412 FF_DISABLE_DEPRECATION_WARNINGS
413 if (avctx->border_masking != 0.0)
414 s->border_masking = avctx->border_masking;
415 FF_ENABLE_DEPRECATION_WARNINGS
418 s->adaptive_quant = (s->avctx->lumi_masking ||
419 s->avctx->dark_masking ||
420 s->avctx->temporal_cplx_masking ||
421 s->avctx->spatial_cplx_masking ||
422 s->avctx->p_masking ||
424 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
427 s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
429 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
430 switch(avctx->codec_id) {
431 case AV_CODEC_ID_MPEG1VIDEO:
432 case AV_CODEC_ID_MPEG2VIDEO:
433 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
435 case AV_CODEC_ID_MPEG4:
436 case AV_CODEC_ID_MSMPEG4V1:
437 case AV_CODEC_ID_MSMPEG4V2:
438 case AV_CODEC_ID_MSMPEG4V3:
439 if (avctx->rc_max_rate >= 15000000) {
440 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
441 } else if(avctx->rc_max_rate >= 2000000) {
442 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
443 } else if(avctx->rc_max_rate >= 384000) {
444 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
446 avctx->rc_buffer_size = 40;
447 avctx->rc_buffer_size *= 16384;
450 if (avctx->rc_buffer_size) {
451 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
455 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
456 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
460 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
461 av_log(avctx, AV_LOG_INFO,
462 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
465 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
466 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
470 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
471 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
475 if (avctx->rc_max_rate &&
476 avctx->rc_max_rate == avctx->bit_rate &&
477 avctx->rc_max_rate != avctx->rc_min_rate) {
478 av_log(avctx, AV_LOG_INFO,
479 "impossible bitrate constraints, this will fail\n");
482 if (avctx->rc_buffer_size &&
483 avctx->bit_rate * (int64_t)avctx->time_base.num >
484 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
485 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
489 if (!s->fixed_qscale &&
490 avctx->bit_rate * av_q2d(avctx->time_base) >
491 avctx->bit_rate_tolerance) {
492 av_log(avctx, AV_LOG_WARNING,
493 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, (int64_t)avctx->bit_rate);
494 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
497 if (s->avctx->rc_max_rate &&
498 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
499 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
500 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
501 90000LL * (avctx->rc_buffer_size - 1) >
502 s->avctx->rc_max_rate * 0xFFFFLL) {
503 av_log(avctx, AV_LOG_INFO,
504 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
505 "specified vbv buffer is too large for the given bitrate!\n");
508 if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
509 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
510 s->codec_id != AV_CODEC_ID_FLV1) {
511 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
515 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
516 av_log(avctx, AV_LOG_ERROR,
517 "OBMC is only supported with simple mb decision\n");
521 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
522 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
526 if (s->max_b_frames &&
527 s->codec_id != AV_CODEC_ID_MPEG4 &&
528 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
529 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
530 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
533 if (s->max_b_frames < 0) {
534 av_log(avctx, AV_LOG_ERROR,
535 "max b frames must be 0 or positive for mpegvideo based encoders\n");
539 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
540 s->codec_id == AV_CODEC_ID_H263 ||
541 s->codec_id == AV_CODEC_ID_H263P) &&
542 (avctx->sample_aspect_ratio.num > 255 ||
543 avctx->sample_aspect_ratio.den > 255)) {
544 av_log(avctx, AV_LOG_WARNING,
545 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
546 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
547 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
548 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
551 if ((s->codec_id == AV_CODEC_ID_H263 ||
552 s->codec_id == AV_CODEC_ID_H263P) &&
553 (avctx->width > 2048 ||
554 avctx->height > 1152 )) {
555 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
558 if ((s->codec_id == AV_CODEC_ID_H263 ||
559 s->codec_id == AV_CODEC_ID_H263P) &&
560 ((avctx->width &3) ||
561 (avctx->height&3) )) {
562 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
566 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
567 (avctx->width > 4095 ||
568 avctx->height > 4095 )) {
569 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
573 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
574 (avctx->width > 16383 ||
575 avctx->height > 16383 )) {
576 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
580 if (s->codec_id == AV_CODEC_ID_RV10 &&
582 avctx->height&15 )) {
583 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
584 return AVERROR(EINVAL);
587 if (s->codec_id == AV_CODEC_ID_RV20 &&
590 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
591 return AVERROR(EINVAL);
594 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
595 s->codec_id == AV_CODEC_ID_WMV2) &&
597 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
601 if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
602 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
603 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
607 // FIXME mpeg2 uses that too
608 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
609 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
610 av_log(avctx, AV_LOG_ERROR,
611 "mpeg2 style quantization not supported by codec\n");
615 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
616 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
620 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
621 s->avctx->mb_decision != FF_MB_DECISION_RD) {
622 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
626 if (s->avctx->scenechange_threshold < 1000000000 &&
627 (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
628 av_log(avctx, AV_LOG_ERROR,
629 "closed gop with scene change detection are not supported yet, "
630 "set threshold to 1000000000\n");
634 if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
635 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
636 av_log(avctx, AV_LOG_ERROR,
637 "low delay forcing is only available for mpeg2\n");
640 if (s->max_b_frames != 0) {
641 av_log(avctx, AV_LOG_ERROR,
642 "b frames cannot be used with low delay\n");
647 if (s->q_scale_type == 1) {
648 if (avctx->qmax > 28) {
649 av_log(avctx, AV_LOG_ERROR,
650 "non linear quant only supports qmax <= 28 currently\n");
655 if (avctx->slices > 1 &&
656 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
657 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
658 return AVERROR(EINVAL);
661 if (s->avctx->thread_count > 1 &&
662 s->codec_id != AV_CODEC_ID_MPEG4 &&
663 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
664 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
665 s->codec_id != AV_CODEC_ID_MJPEG &&
666 (s->codec_id != AV_CODEC_ID_H263P)) {
667 av_log(avctx, AV_LOG_ERROR,
668 "multi threaded encoding not supported by codec\n");
672 if (s->avctx->thread_count < 1) {
673 av_log(avctx, AV_LOG_ERROR,
674 "automatic thread number detection not supported by codec, "
679 if (!avctx->time_base.den || !avctx->time_base.num) {
680 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
684 if (avctx->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
685 av_log(avctx, AV_LOG_INFO,
686 "notice: b_frame_strategy only affects the first pass\n");
687 avctx->b_frame_strategy = 0;
690 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
692 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
693 avctx->time_base.den /= i;
694 avctx->time_base.num /= i;
698 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
699 // (a + x * 3 / 8) / x
700 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
701 s->inter_quant_bias = 0;
703 s->intra_quant_bias = 0;
705 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
708 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
709 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
710 return AVERROR(EINVAL);
713 #if FF_API_QUANT_BIAS
714 FF_DISABLE_DEPRECATION_WARNINGS
715 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
716 s->intra_quant_bias = avctx->intra_quant_bias;
717 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
718 s->inter_quant_bias = avctx->inter_quant_bias;
719 FF_ENABLE_DEPRECATION_WARNINGS
722 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
724 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
725 s->avctx->time_base.den > (1 << 16) - 1) {
726 av_log(avctx, AV_LOG_ERROR,
727 "timebase %d/%d not supported by MPEG 4 standard, "
728 "the maximum admitted value for the timebase denominator "
729 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
733 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
735 switch (avctx->codec->id) {
736 case AV_CODEC_ID_MPEG1VIDEO:
737 s->out_format = FMT_MPEG1;
738 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
739 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
741 case AV_CODEC_ID_MPEG2VIDEO:
742 s->out_format = FMT_MPEG1;
743 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
744 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
747 case AV_CODEC_ID_MJPEG:
748 case AV_CODEC_ID_AMV:
749 s->out_format = FMT_MJPEG;
750 s->intra_only = 1; /* force intra only for jpeg */
751 if (!CONFIG_MJPEG_ENCODER ||
752 ff_mjpeg_encode_init(s) < 0)
757 case AV_CODEC_ID_H261:
758 if (!CONFIG_H261_ENCODER)
760 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
761 av_log(avctx, AV_LOG_ERROR,
762 "The specified picture size of %dx%d is not valid for the "
763 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
764 s->width, s->height);
767 s->out_format = FMT_H261;
770 s->rtp_mode = 0; /* Sliced encoding not supported */
772 case AV_CODEC_ID_H263:
773 if (!CONFIG_H263_ENCODER)
775 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
776 s->width, s->height) == 8) {
777 av_log(avctx, AV_LOG_ERROR,
778 "The specified picture size of %dx%d is not valid for "
779 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
780 "352x288, 704x576, and 1408x1152. "
781 "Try H.263+.\n", s->width, s->height);
784 s->out_format = FMT_H263;
788 case AV_CODEC_ID_H263P:
789 s->out_format = FMT_H263;
792 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
793 s->modified_quant = s->h263_aic;
794 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
795 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
798 /* These are just to be sure */
802 case AV_CODEC_ID_FLV1:
803 s->out_format = FMT_H263;
804 s->h263_flv = 2; /* format = 1; 11-bit codes */
805 s->unrestricted_mv = 1;
806 s->rtp_mode = 0; /* don't allow GOB */
810 case AV_CODEC_ID_RV10:
811 s->out_format = FMT_H263;
815 case AV_CODEC_ID_RV20:
816 s->out_format = FMT_H263;
819 s->modified_quant = 1;
823 s->unrestricted_mv = 0;
825 case AV_CODEC_ID_MPEG4:
826 s->out_format = FMT_H263;
828 s->unrestricted_mv = 1;
829 s->low_delay = s->max_b_frames ? 0 : 1;
830 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
832 case AV_CODEC_ID_MSMPEG4V2:
833 s->out_format = FMT_H263;
835 s->unrestricted_mv = 1;
836 s->msmpeg4_version = 2;
840 case AV_CODEC_ID_MSMPEG4V3:
841 s->out_format = FMT_H263;
843 s->unrestricted_mv = 1;
844 s->msmpeg4_version = 3;
845 s->flipflop_rounding = 1;
849 case AV_CODEC_ID_WMV1:
850 s->out_format = FMT_H263;
852 s->unrestricted_mv = 1;
853 s->msmpeg4_version = 4;
854 s->flipflop_rounding = 1;
858 case AV_CODEC_ID_WMV2:
859 s->out_format = FMT_H263;
861 s->unrestricted_mv = 1;
862 s->msmpeg4_version = 5;
863 s->flipflop_rounding = 1;
871 avctx->has_b_frames = !s->low_delay;
875 s->progressive_frame =
876 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
877 AV_CODEC_FLAG_INTERLACED_ME) ||
882 if (ff_mpv_common_init(s) < 0)
885 ff_fdctdsp_init(&s->fdsp, avctx);
886 ff_me_cmp_init(&s->mecc, avctx);
887 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
888 ff_pixblockdsp_init(&s->pdsp, avctx);
889 ff_qpeldsp_init(&s->qdsp);
891 if (s->msmpeg4_version) {
892 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
893 2 * 2 * (MAX_LEVEL + 1) *
894 (MAX_RUN + 1) * 2 * sizeof(int), fail);
896 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
898 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
899 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
900 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
901 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
902 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
903 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
904 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
905 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
906 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
907 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
909 if (s->avctx->noise_reduction) {
910 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
911 2 * 64 * sizeof(uint16_t), fail);
914 ff_dct_encode_init(s);
916 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
917 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
919 if (s->slice_context_count > 1) {
922 if (avctx->codec_id == AV_CODEC_ID_H263P)
923 s->h263_slice_structured = 1;
926 s->quant_precision = 5;
928 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
929 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->avctx->frame_skip_cmp);
931 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
932 ff_h261_encode_init(s);
933 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
934 ff_h263_encode_init(s);
935 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
936 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
938 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
939 && s->out_format == FMT_MPEG1)
940 ff_mpeg1_encode_init(s);
943 for (i = 0; i < 64; i++) {
944 int j = s->idsp.idct_permutation[i];
945 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
947 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
948 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
949 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
951 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
954 s->chroma_intra_matrix[j] =
955 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
956 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
958 if (s->avctx->intra_matrix)
959 s->intra_matrix[j] = s->avctx->intra_matrix[i];
960 if (s->avctx->inter_matrix)
961 s->inter_matrix[j] = s->avctx->inter_matrix[i];
964 /* precompute matrix */
965 /* for mjpeg, we do include qscale in the matrix */
966 if (s->out_format != FMT_MJPEG) {
967 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
968 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
970 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
971 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
975 if (ff_rate_control_init(s) < 0)
978 #if FF_API_ERROR_RATE
979 FF_DISABLE_DEPRECATION_WARNINGS
980 if (avctx->error_rate)
981 s->error_rate = avctx->error_rate;
982 FF_ENABLE_DEPRECATION_WARNINGS;
985 #if FF_API_NORMALIZE_AQP
986 FF_DISABLE_DEPRECATION_WARNINGS
987 if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
988 s->mpv_flags |= FF_MPV_FLAG_NAQ;
989 FF_ENABLE_DEPRECATION_WARNINGS;
993 FF_DISABLE_DEPRECATION_WARNINGS
994 if (avctx->flags & CODEC_FLAG_MV0)
995 s->mpv_flags |= FF_MPV_FLAG_MV0;
996 FF_ENABLE_DEPRECATION_WARNINGS
1000 FF_DISABLE_DEPRECATION_WARNINGS
1001 if (avctx->rc_qsquish != 0.0)
1002 s->rc_qsquish = avctx->rc_qsquish;
1003 if (avctx->rc_qmod_amp != 0.0)
1004 s->rc_qmod_amp = avctx->rc_qmod_amp;
1005 if (avctx->rc_qmod_freq)
1006 s->rc_qmod_freq = avctx->rc_qmod_freq;
1007 if (avctx->rc_buffer_aggressivity != 1.0)
1008 s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
1009 if (avctx->rc_initial_cplx != 0.0)
1010 s->rc_initial_cplx = avctx->rc_initial_cplx;
1012 s->lmin = avctx->lmin;
1014 s->lmax = avctx->lmax;
1017 av_freep(&s->rc_eq);
1018 s->rc_eq = av_strdup(avctx->rc_eq);
1020 return AVERROR(ENOMEM);
1022 FF_ENABLE_DEPRECATION_WARNINGS
1025 if (avctx->b_frame_strategy == 2) {
1026 for (i = 0; i < s->max_b_frames + 2; i++) {
1027 s->tmp_frames[i] = av_frame_alloc();
1028 if (!s->tmp_frames[i])
1029 return AVERROR(ENOMEM);
1031 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1032 s->tmp_frames[i]->width = s->width >> avctx->brd_scale;
1033 s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
1035 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1041 cpb_props = ff_add_cpb_side_data(avctx);
1043 return AVERROR(ENOMEM);
1044 cpb_props->max_bitrate = avctx->rc_max_rate;
1045 cpb_props->min_bitrate = avctx->rc_min_rate;
1046 cpb_props->avg_bitrate = avctx->bit_rate;
1047 cpb_props->buffer_size = avctx->rc_buffer_size;
1051 ff_mpv_encode_end(avctx);
1052 return AVERROR_UNKNOWN;
1055 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1057 MpegEncContext *s = avctx->priv_data;
1060 ff_rate_control_uninit(s);
1062 ff_mpv_common_end(s);
1063 if (CONFIG_MJPEG_ENCODER &&
1064 s->out_format == FMT_MJPEG)
1065 ff_mjpeg_encode_close(s);
1067 av_freep(&avctx->extradata);
1069 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1070 av_frame_free(&s->tmp_frames[i]);
1072 ff_free_picture_tables(&s->new_picture);
1073 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1075 av_freep(&s->avctx->stats_out);
1076 av_freep(&s->ac_stats);
1078 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1079 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1080 s->q_chroma_intra_matrix= NULL;
1081 s->q_chroma_intra_matrix16= NULL;
1082 av_freep(&s->q_intra_matrix);
1083 av_freep(&s->q_inter_matrix);
1084 av_freep(&s->q_intra_matrix16);
1085 av_freep(&s->q_inter_matrix16);
1086 av_freep(&s->input_picture);
1087 av_freep(&s->reordered_input_picture);
1088 av_freep(&s->dct_offset);
1093 static int get_sae(uint8_t *src, int ref, int stride)
1098 for (y = 0; y < 16; y++) {
1099 for (x = 0; x < 16; x++) {
1100 acc += FFABS(src[x + y * stride] - ref);
1107 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1108 uint8_t *ref, int stride)
1114 h = s->height & ~15;
1116 for (y = 0; y < h; y += 16) {
1117 for (x = 0; x < w; x += 16) {
1118 int offset = x + y * stride;
1119 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1121 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1122 int sae = get_sae(src + offset, mean, stride);
1124 acc += sae + 500 < sad;
1130 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1132 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1133 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1134 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1135 &s->linesize, &s->uvlinesize);
1138 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1140 Picture *pic = NULL;
1142 int i, display_picture_number = 0, ret;
1143 int encoding_delay = s->max_b_frames ? s->max_b_frames
1144 : (s->low_delay ? 0 : 1);
1145 int flush_offset = 1;
1150 display_picture_number = s->input_picture_number++;
1152 if (pts != AV_NOPTS_VALUE) {
1153 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1154 int64_t last = s->user_specified_pts;
1157 av_log(s->avctx, AV_LOG_ERROR,
1158 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1160 return AVERROR(EINVAL);
1163 if (!s->low_delay && display_picture_number == 1)
1164 s->dts_delta = pts - last;
1166 s->user_specified_pts = pts;
1168 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1169 s->user_specified_pts =
1170 pts = s->user_specified_pts + 1;
1171 av_log(s->avctx, AV_LOG_INFO,
1172 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1175 pts = display_picture_number;
1179 if (!pic_arg->buf[0] ||
1180 pic_arg->linesize[0] != s->linesize ||
1181 pic_arg->linesize[1] != s->uvlinesize ||
1182 pic_arg->linesize[2] != s->uvlinesize)
1184 if ((s->width & 15) || (s->height & 15))
1186 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1188 if (s->linesize & (STRIDE_ALIGN-1))
1191 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1192 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1194 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1198 pic = &s->picture[i];
1202 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1205 ret = alloc_picture(s, pic, direct);
1210 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1211 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1212 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1215 int h_chroma_shift, v_chroma_shift;
1216 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1220 for (i = 0; i < 3; i++) {
1221 int src_stride = pic_arg->linesize[i];
1222 int dst_stride = i ? s->uvlinesize : s->linesize;
1223 int h_shift = i ? h_chroma_shift : 0;
1224 int v_shift = i ? v_chroma_shift : 0;
1225 int w = s->width >> h_shift;
1226 int h = s->height >> v_shift;
1227 uint8_t *src = pic_arg->data[i];
1228 uint8_t *dst = pic->f->data[i];
1231 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1232 && !s->progressive_sequence
1233 && FFALIGN(s->height, 32) - s->height > 16)
1236 if (!s->avctx->rc_buffer_size)
1237 dst += INPLACE_OFFSET;
1239 if (src_stride == dst_stride)
1240 memcpy(dst, src, src_stride * h);
1243 uint8_t *dst2 = dst;
1245 memcpy(dst2, src, w);
1250 if ((s->width & 15) || (s->height & (vpad-1))) {
1251 s->mpvencdsp.draw_edges(dst, dst_stride,
1260 ret = av_frame_copy_props(pic->f, pic_arg);
1264 pic->f->display_picture_number = display_picture_number;
1265 pic->f->pts = pts; // we set this here to avoid modifiying pic_arg
1267 /* Flushing: When we have not received enough input frames,
1268 * ensure s->input_picture[0] contains the first picture */
1269 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1270 if (s->input_picture[flush_offset])
1273 if (flush_offset <= 1)
1276 encoding_delay = encoding_delay - flush_offset + 1;
1279 /* shift buffer entries */
1280 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1281 s->input_picture[i - flush_offset] = s->input_picture[i];
1283 s->input_picture[encoding_delay] = (Picture*) pic;
1288 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1292 int64_t score64 = 0;
1294 for (plane = 0; plane < 3; plane++) {
1295 const int stride = p->f->linesize[plane];
1296 const int bw = plane ? 1 : 2;
1297 for (y = 0; y < s->mb_height * bw; y++) {
1298 for (x = 0; x < s->mb_width * bw; x++) {
1299 int off = p->shared ? 0 : 16;
1300 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1301 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1302 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1304 switch (FFABS(s->avctx->frame_skip_exp)) {
1305 case 0: score = FFMAX(score, v); break;
1306 case 1: score += FFABS(v); break;
1307 case 2: score64 += v * (int64_t)v; break;
1308 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1309 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1318 if (s->avctx->frame_skip_exp < 0)
1319 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1320 -1.0/s->avctx->frame_skip_exp);
1322 if (score64 < s->avctx->frame_skip_threshold)
1324 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1329 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1331 AVPacket pkt = { 0 };
1332 int ret, got_output;
1334 av_init_packet(&pkt);
1335 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1340 av_packet_unref(&pkt);
1344 static int estimate_best_b_count(MpegEncContext *s)
1346 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1347 AVCodecContext *c = avcodec_alloc_context3(NULL);
1348 const int scale = s->avctx->brd_scale;
1349 int i, j, out_size, p_lambda, b_lambda, lambda2;
1350 int64_t best_rd = INT64_MAX;
1351 int best_b_count = -1;
1354 return AVERROR(ENOMEM);
1355 av_assert0(scale >= 0 && scale <= 3);
1358 //s->next_picture_ptr->quality;
1359 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1360 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1361 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1362 if (!b_lambda) // FIXME we should do this somewhere else
1363 b_lambda = p_lambda;
1364 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1367 c->width = s->width >> scale;
1368 c->height = s->height >> scale;
1369 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1370 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1371 c->mb_decision = s->avctx->mb_decision;
1372 c->me_cmp = s->avctx->me_cmp;
1373 c->mb_cmp = s->avctx->mb_cmp;
1374 c->me_sub_cmp = s->avctx->me_sub_cmp;
1375 c->pix_fmt = AV_PIX_FMT_YUV420P;
1376 c->time_base = s->avctx->time_base;
1377 c->max_b_frames = s->max_b_frames;
1379 if (avcodec_open2(c, codec, NULL) < 0)
1382 for (i = 0; i < s->max_b_frames + 2; i++) {
1383 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1384 s->next_picture_ptr;
1387 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1388 pre_input = *pre_input_ptr;
1389 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1391 if (!pre_input.shared && i) {
1392 data[0] += INPLACE_OFFSET;
1393 data[1] += INPLACE_OFFSET;
1394 data[2] += INPLACE_OFFSET;
1397 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1398 s->tmp_frames[i]->linesize[0],
1400 pre_input.f->linesize[0],
1401 c->width, c->height);
1402 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1403 s->tmp_frames[i]->linesize[1],
1405 pre_input.f->linesize[1],
1406 c->width >> 1, c->height >> 1);
1407 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1408 s->tmp_frames[i]->linesize[2],
1410 pre_input.f->linesize[2],
1411 c->width >> 1, c->height >> 1);
1415 for (j = 0; j < s->max_b_frames + 1; j++) {
1418 if (!s->input_picture[j])
1421 c->error[0] = c->error[1] = c->error[2] = 0;
1423 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1424 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1426 out_size = encode_frame(c, s->tmp_frames[0]);
1428 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1430 for (i = 0; i < s->max_b_frames + 1; i++) {
1431 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1433 s->tmp_frames[i + 1]->pict_type = is_p ?
1434 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1435 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1437 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1439 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1442 /* get the delayed frames */
1444 out_size = encode_frame(c, NULL);
1445 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1448 rd += c->error[0] + c->error[1] + c->error[2];
1459 return best_b_count;
1462 static int select_input_picture(MpegEncContext *s)
1466 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1467 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1468 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1470 /* set next picture type & ordering */
1471 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1472 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1473 if (s->picture_in_gop_number < s->gop_size &&
1474 s->next_picture_ptr &&
1475 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1476 // FIXME check that te gop check above is +-1 correct
1477 av_frame_unref(s->input_picture[0]->f);
1479 ff_vbv_update(s, 0);
1485 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1486 !s->next_picture_ptr || s->intra_only) {
1487 s->reordered_input_picture[0] = s->input_picture[0];
1488 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1489 s->reordered_input_picture[0]->f->coded_picture_number =
1490 s->coded_picture_number++;
1494 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1495 for (i = 0; i < s->max_b_frames + 1; i++) {
1496 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1498 if (pict_num >= s->rc_context.num_entries)
1500 if (!s->input_picture[i]) {
1501 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1505 s->input_picture[i]->f->pict_type =
1506 s->rc_context.entry[pict_num].new_pict_type;
1510 if (s->avctx->b_frame_strategy == 0) {
1511 b_frames = s->max_b_frames;
1512 while (b_frames && !s->input_picture[b_frames])
1514 } else if (s->avctx->b_frame_strategy == 1) {
1515 for (i = 1; i < s->max_b_frames + 1; i++) {
1516 if (s->input_picture[i] &&
1517 s->input_picture[i]->b_frame_score == 0) {
1518 s->input_picture[i]->b_frame_score =
1520 s->input_picture[i ]->f->data[0],
1521 s->input_picture[i - 1]->f->data[0],
1525 for (i = 0; i < s->max_b_frames + 1; i++) {
1526 if (!s->input_picture[i] ||
1527 s->input_picture[i]->b_frame_score - 1 >
1528 s->mb_num / s->avctx->b_sensitivity)
1532 b_frames = FFMAX(0, i - 1);
1535 for (i = 0; i < b_frames + 1; i++) {
1536 s->input_picture[i]->b_frame_score = 0;
1538 } else if (s->avctx->b_frame_strategy == 2) {
1539 b_frames = estimate_best_b_count(s);
1541 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1547 for (i = b_frames - 1; i >= 0; i--) {
1548 int type = s->input_picture[i]->f->pict_type;
1549 if (type && type != AV_PICTURE_TYPE_B)
1552 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1553 b_frames == s->max_b_frames) {
1554 av_log(s->avctx, AV_LOG_ERROR,
1555 "warning, too many b frames in a row\n");
1558 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1559 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1560 s->gop_size > s->picture_in_gop_number) {
1561 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1563 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1565 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1569 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1570 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1573 s->reordered_input_picture[0] = s->input_picture[b_frames];
1574 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1575 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1576 s->reordered_input_picture[0]->f->coded_picture_number =
1577 s->coded_picture_number++;
1578 for (i = 0; i < b_frames; i++) {
1579 s->reordered_input_picture[i + 1] = s->input_picture[i];
1580 s->reordered_input_picture[i + 1]->f->pict_type =
1582 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1583 s->coded_picture_number++;
1588 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1590 if (s->reordered_input_picture[0]) {
1591 s->reordered_input_picture[0]->reference =
1592 s->reordered_input_picture[0]->f->pict_type !=
1593 AV_PICTURE_TYPE_B ? 3 : 0;
1595 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1598 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1599 // input is a shared pix, so we can't modifiy it -> alloc a new
1600 // one & ensure that the shared one is reuseable
1603 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1606 pic = &s->picture[i];
1608 pic->reference = s->reordered_input_picture[0]->reference;
1609 if (alloc_picture(s, pic, 0) < 0) {
1613 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1617 /* mark us unused / free shared pic */
1618 av_frame_unref(s->reordered_input_picture[0]->f);
1619 s->reordered_input_picture[0]->shared = 0;
1621 s->current_picture_ptr = pic;
1623 // input is not a shared pix -> reuse buffer for current_pix
1624 s->current_picture_ptr = s->reordered_input_picture[0];
1625 for (i = 0; i < 4; i++) {
1626 s->new_picture.f->data[i] += INPLACE_OFFSET;
1629 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1630 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1631 s->current_picture_ptr)) < 0)
1634 s->picture_number = s->new_picture.f->display_picture_number;
1639 static void frame_end(MpegEncContext *s)
1641 if (s->unrestricted_mv &&
1642 s->current_picture.reference &&
1644 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1645 int hshift = desc->log2_chroma_w;
1646 int vshift = desc->log2_chroma_h;
1647 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1648 s->current_picture.f->linesize[0],
1649 s->h_edge_pos, s->v_edge_pos,
1650 EDGE_WIDTH, EDGE_WIDTH,
1651 EDGE_TOP | EDGE_BOTTOM);
1652 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1653 s->current_picture.f->linesize[1],
1654 s->h_edge_pos >> hshift,
1655 s->v_edge_pos >> vshift,
1656 EDGE_WIDTH >> hshift,
1657 EDGE_WIDTH >> vshift,
1658 EDGE_TOP | EDGE_BOTTOM);
1659 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1660 s->current_picture.f->linesize[2],
1661 s->h_edge_pos >> hshift,
1662 s->v_edge_pos >> vshift,
1663 EDGE_WIDTH >> hshift,
1664 EDGE_WIDTH >> vshift,
1665 EDGE_TOP | EDGE_BOTTOM);
1670 s->last_pict_type = s->pict_type;
1671 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1672 if (s->pict_type!= AV_PICTURE_TYPE_B)
1673 s->last_non_b_pict_type = s->pict_type;
1675 #if FF_API_CODED_FRAME
1676 FF_DISABLE_DEPRECATION_WARNINGS
1677 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1678 FF_ENABLE_DEPRECATION_WARNINGS
1680 #if FF_API_ERROR_FRAME
1681 FF_DISABLE_DEPRECATION_WARNINGS
1682 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1683 sizeof(s->current_picture.encoding_error));
1684 FF_ENABLE_DEPRECATION_WARNINGS
1688 static void update_noise_reduction(MpegEncContext *s)
1692 for (intra = 0; intra < 2; intra++) {
1693 if (s->dct_count[intra] > (1 << 16)) {
1694 for (i = 0; i < 64; i++) {
1695 s->dct_error_sum[intra][i] >>= 1;
1697 s->dct_count[intra] >>= 1;
1700 for (i = 0; i < 64; i++) {
1701 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1702 s->dct_count[intra] +
1703 s->dct_error_sum[intra][i] / 2) /
1704 (s->dct_error_sum[intra][i] + 1);
1709 static int frame_start(MpegEncContext *s)
1713 /* mark & release old frames */
1714 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1715 s->last_picture_ptr != s->next_picture_ptr &&
1716 s->last_picture_ptr->f->buf[0]) {
1717 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1720 s->current_picture_ptr->f->pict_type = s->pict_type;
1721 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1723 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1724 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1725 s->current_picture_ptr)) < 0)
1728 if (s->pict_type != AV_PICTURE_TYPE_B) {
1729 s->last_picture_ptr = s->next_picture_ptr;
1731 s->next_picture_ptr = s->current_picture_ptr;
1734 if (s->last_picture_ptr) {
1735 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1736 if (s->last_picture_ptr->f->buf[0] &&
1737 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1738 s->last_picture_ptr)) < 0)
1741 if (s->next_picture_ptr) {
1742 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1743 if (s->next_picture_ptr->f->buf[0] &&
1744 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1745 s->next_picture_ptr)) < 0)
1749 if (s->picture_structure!= PICT_FRAME) {
1751 for (i = 0; i < 4; i++) {
1752 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1753 s->current_picture.f->data[i] +=
1754 s->current_picture.f->linesize[i];
1756 s->current_picture.f->linesize[i] *= 2;
1757 s->last_picture.f->linesize[i] *= 2;
1758 s->next_picture.f->linesize[i] *= 2;
1762 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1763 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1764 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1765 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1766 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1767 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1769 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1770 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1773 if (s->dct_error_sum) {
1774 av_assert2(s->avctx->noise_reduction && s->encoding);
1775 update_noise_reduction(s);
1781 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1782 const AVFrame *pic_arg, int *got_packet)
1784 MpegEncContext *s = avctx->priv_data;
1785 int i, stuffing_count, ret;
1786 int context_count = s->slice_context_count;
1788 s->vbv_ignore_qmax = 0;
1790 s->picture_in_gop_number++;
1792 if (load_input_picture(s, pic_arg) < 0)
1795 if (select_input_picture(s) < 0) {
1800 if (s->new_picture.f->data[0]) {
1801 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1802 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1804 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1805 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1808 s->mb_info_ptr = av_packet_new_side_data(pkt,
1809 AV_PKT_DATA_H263_MB_INFO,
1810 s->mb_width*s->mb_height*12);
1811 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1814 for (i = 0; i < context_count; i++) {
1815 int start_y = s->thread_context[i]->start_mb_y;
1816 int end_y = s->thread_context[i]-> end_mb_y;
1817 int h = s->mb_height;
1818 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1819 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1821 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1824 s->pict_type = s->new_picture.f->pict_type;
1826 ret = frame_start(s);
1830 ret = encode_picture(s, s->picture_number);
1831 if (growing_buffer) {
1832 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1833 pkt->data = s->pb.buf;
1834 pkt->size = avctx->internal->byte_buffer_size;
1839 #if FF_API_STAT_BITS
1840 FF_DISABLE_DEPRECATION_WARNINGS
1841 avctx->header_bits = s->header_bits;
1842 avctx->mv_bits = s->mv_bits;
1843 avctx->misc_bits = s->misc_bits;
1844 avctx->i_tex_bits = s->i_tex_bits;
1845 avctx->p_tex_bits = s->p_tex_bits;
1846 avctx->i_count = s->i_count;
1847 // FIXME f/b_count in avctx
1848 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1849 avctx->skip_count = s->skip_count;
1850 FF_ENABLE_DEPRECATION_WARNINGS
1855 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1856 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1858 if (avctx->rc_buffer_size) {
1859 RateControlContext *rcc = &s->rc_context;
1860 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1861 int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1862 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1864 if (put_bits_count(&s->pb) > max_size &&
1865 s->lambda < s->lmax) {
1866 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1867 (s->qscale + 1) / s->qscale);
1868 if (s->adaptive_quant) {
1870 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1871 s->lambda_table[i] =
1872 FFMAX(s->lambda_table[i] + min_step,
1873 s->lambda_table[i] * (s->qscale + 1) /
1876 s->mb_skipped = 0; // done in frame_start()
1877 // done in encode_picture() so we must undo it
1878 if (s->pict_type == AV_PICTURE_TYPE_P) {
1879 if (s->flipflop_rounding ||
1880 s->codec_id == AV_CODEC_ID_H263P ||
1881 s->codec_id == AV_CODEC_ID_MPEG4)
1882 s->no_rounding ^= 1;
1884 if (s->pict_type != AV_PICTURE_TYPE_B) {
1885 s->time_base = s->last_time_base;
1886 s->last_non_b_time = s->time - s->pp_time;
1888 for (i = 0; i < context_count; i++) {
1889 PutBitContext *pb = &s->thread_context[i]->pb;
1890 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1892 s->vbv_ignore_qmax = 1;
1893 av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1897 av_assert0(s->avctx->rc_max_rate);
1900 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1901 ff_write_pass1_stats(s);
1903 for (i = 0; i < 4; i++) {
1904 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1905 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1907 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1908 s->current_picture_ptr->encoding_error,
1909 (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1912 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1913 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1914 s->misc_bits + s->i_tex_bits +
1916 flush_put_bits(&s->pb);
1917 s->frame_bits = put_bits_count(&s->pb);
1919 stuffing_count = ff_vbv_update(s, s->frame_bits);
1920 s->stuffing_bits = 8*stuffing_count;
1921 if (stuffing_count) {
1922 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1923 stuffing_count + 50) {
1924 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1928 switch (s->codec_id) {
1929 case AV_CODEC_ID_MPEG1VIDEO:
1930 case AV_CODEC_ID_MPEG2VIDEO:
1931 while (stuffing_count--) {
1932 put_bits(&s->pb, 8, 0);
1935 case AV_CODEC_ID_MPEG4:
1936 put_bits(&s->pb, 16, 0);
1937 put_bits(&s->pb, 16, 0x1C3);
1938 stuffing_count -= 4;
1939 while (stuffing_count--) {
1940 put_bits(&s->pb, 8, 0xFF);
1944 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1946 flush_put_bits(&s->pb);
1947 s->frame_bits = put_bits_count(&s->pb);
1950 /* update mpeg1/2 vbv_delay for CBR */
1951 if (s->avctx->rc_max_rate &&
1952 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1953 s->out_format == FMT_MPEG1 &&
1954 90000LL * (avctx->rc_buffer_size - 1) <=
1955 s->avctx->rc_max_rate * 0xFFFFLL) {
1956 AVCPBProperties *props;
1959 int vbv_delay, min_delay;
1960 double inbits = s->avctx->rc_max_rate *
1961 av_q2d(s->avctx->time_base);
1962 int minbits = s->frame_bits - 8 *
1963 (s->vbv_delay_ptr - s->pb.buf - 1);
1964 double bits = s->rc_context.buffer_index + minbits - inbits;
1967 av_log(s->avctx, AV_LOG_ERROR,
1968 "Internal error, negative bits\n");
1970 assert(s->repeat_first_field == 0);
1972 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1973 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1974 s->avctx->rc_max_rate;
1976 vbv_delay = FFMAX(vbv_delay, min_delay);
1978 av_assert0(vbv_delay < 0xFFFF);
1980 s->vbv_delay_ptr[0] &= 0xF8;
1981 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1982 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1983 s->vbv_delay_ptr[2] &= 0x07;
1984 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1986 props = av_cpb_properties_alloc(&props_size);
1988 return AVERROR(ENOMEM);
1989 props->vbv_delay = vbv_delay * 300;
1991 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
1992 (uint8_t*)props, props_size);
1998 #if FF_API_VBV_DELAY
1999 FF_DISABLE_DEPRECATION_WARNINGS
2000 avctx->vbv_delay = vbv_delay * 300;
2001 FF_ENABLE_DEPRECATION_WARNINGS
2004 s->total_bits += s->frame_bits;
2005 #if FF_API_STAT_BITS
2006 FF_DISABLE_DEPRECATION_WARNINGS
2007 avctx->frame_bits = s->frame_bits;
2008 FF_ENABLE_DEPRECATION_WARNINGS
2012 pkt->pts = s->current_picture.f->pts;
2013 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2014 if (!s->current_picture.f->coded_picture_number)
2015 pkt->dts = pkt->pts - s->dts_delta;
2017 pkt->dts = s->reordered_pts;
2018 s->reordered_pts = pkt->pts;
2020 pkt->dts = pkt->pts;
2021 if (s->current_picture.f->key_frame)
2022 pkt->flags |= AV_PKT_FLAG_KEY;
2024 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2029 /* release non-reference frames */
2030 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2031 if (!s->picture[i].reference)
2032 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2035 av_assert1((s->frame_bits & 7) == 0);
2037 pkt->size = s->frame_bits / 8;
2038 *got_packet = !!pkt->size;
2042 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2043 int n, int threshold)
2045 static const char tab[64] = {
2046 3, 2, 2, 1, 1, 1, 1, 1,
2047 1, 1, 1, 1, 1, 1, 1, 1,
2048 1, 1, 1, 1, 1, 1, 1, 1,
2049 0, 0, 0, 0, 0, 0, 0, 0,
2050 0, 0, 0, 0, 0, 0, 0, 0,
2051 0, 0, 0, 0, 0, 0, 0, 0,
2052 0, 0, 0, 0, 0, 0, 0, 0,
2053 0, 0, 0, 0, 0, 0, 0, 0
2058 int16_t *block = s->block[n];
2059 const int last_index = s->block_last_index[n];
2062 if (threshold < 0) {
2064 threshold = -threshold;
2068 /* Are all we could set to zero already zero? */
2069 if (last_index <= skip_dc - 1)
2072 for (i = 0; i <= last_index; i++) {
2073 const int j = s->intra_scantable.permutated[i];
2074 const int level = FFABS(block[j]);
2076 if (skip_dc && i == 0)
2080 } else if (level > 1) {
2086 if (score >= threshold)
2088 for (i = skip_dc; i <= last_index; i++) {
2089 const int j = s->intra_scantable.permutated[i];
2093 s->block_last_index[n] = 0;
2095 s->block_last_index[n] = -1;
2098 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2102 const int maxlevel = s->max_qcoeff;
2103 const int minlevel = s->min_qcoeff;
2107 i = 1; // skip clipping of intra dc
2111 for (; i <= last_index; i++) {
2112 const int j = s->intra_scantable.permutated[i];
2113 int level = block[j];
2115 if (level > maxlevel) {
2118 } else if (level < minlevel) {
2126 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2127 av_log(s->avctx, AV_LOG_INFO,
2128 "warning, clipping %d dct coefficients to %d..%d\n",
2129 overflow, minlevel, maxlevel);
2132 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2136 for (y = 0; y < 8; y++) {
2137 for (x = 0; x < 8; x++) {
2143 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2144 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2145 int v = ptr[x2 + y2 * stride];
2151 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2156 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2157 int motion_x, int motion_y,
2158 int mb_block_height,
2162 int16_t weight[12][64];
2163 int16_t orig[12][64];
2164 const int mb_x = s->mb_x;
2165 const int mb_y = s->mb_y;
2168 int dct_offset = s->linesize * 8; // default for progressive frames
2169 int uv_dct_offset = s->uvlinesize * 8;
2170 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2171 ptrdiff_t wrap_y, wrap_c;
2173 for (i = 0; i < mb_block_count; i++)
2174 skip_dct[i] = s->skipdct;
2176 if (s->adaptive_quant) {
2177 const int last_qp = s->qscale;
2178 const int mb_xy = mb_x + mb_y * s->mb_stride;
2180 s->lambda = s->lambda_table[mb_xy];
2183 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2184 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2185 s->dquant = s->qscale - last_qp;
2187 if (s->out_format == FMT_H263) {
2188 s->dquant = av_clip(s->dquant, -2, 2);
2190 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2192 if (s->pict_type == AV_PICTURE_TYPE_B) {
2193 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2196 if (s->mv_type == MV_TYPE_8X8)
2202 ff_set_qscale(s, last_qp + s->dquant);
2203 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2204 ff_set_qscale(s, s->qscale + s->dquant);
2206 wrap_y = s->linesize;
2207 wrap_c = s->uvlinesize;
2208 ptr_y = s->new_picture.f->data[0] +
2209 (mb_y * 16 * wrap_y) + mb_x * 16;
2210 ptr_cb = s->new_picture.f->data[1] +
2211 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2212 ptr_cr = s->new_picture.f->data[2] +
2213 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2215 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2216 uint8_t *ebuf = s->sc.edge_emu_buffer + 36 * wrap_y;
2217 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2218 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2219 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2221 16, 16, mb_x * 16, mb_y * 16,
2222 s->width, s->height);
2224 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2226 mb_block_width, mb_block_height,
2227 mb_x * mb_block_width, mb_y * mb_block_height,
2229 ptr_cb = ebuf + 16 * wrap_y;
2230 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2232 mb_block_width, mb_block_height,
2233 mb_x * mb_block_width, mb_y * mb_block_height,
2235 ptr_cr = ebuf + 16 * wrap_y + 16;
2239 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2240 int progressive_score, interlaced_score;
2242 s->interlaced_dct = 0;
2243 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2244 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2245 NULL, wrap_y, 8) - 400;
2247 if (progressive_score > 0) {
2248 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2249 NULL, wrap_y * 2, 8) +
2250 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2251 NULL, wrap_y * 2, 8);
2252 if (progressive_score > interlaced_score) {
2253 s->interlaced_dct = 1;
2255 dct_offset = wrap_y;
2256 uv_dct_offset = wrap_c;
2258 if (s->chroma_format == CHROMA_422 ||
2259 s->chroma_format == CHROMA_444)
2265 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2266 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2267 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2268 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2270 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2274 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2275 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2276 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2277 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2278 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2279 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2280 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2281 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2282 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2283 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2284 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2285 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2289 op_pixels_func (*op_pix)[4];
2290 qpel_mc_func (*op_qpix)[16];
2291 uint8_t *dest_y, *dest_cb, *dest_cr;
2293 dest_y = s->dest[0];
2294 dest_cb = s->dest[1];
2295 dest_cr = s->dest[2];
2297 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2298 op_pix = s->hdsp.put_pixels_tab;
2299 op_qpix = s->qdsp.put_qpel_pixels_tab;
2301 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2302 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2305 if (s->mv_dir & MV_DIR_FORWARD) {
2306 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2307 s->last_picture.f->data,
2309 op_pix = s->hdsp.avg_pixels_tab;
2310 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2312 if (s->mv_dir & MV_DIR_BACKWARD) {
2313 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2314 s->next_picture.f->data,
2318 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2319 int progressive_score, interlaced_score;
2321 s->interlaced_dct = 0;
2322 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2323 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2327 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2328 progressive_score -= 400;
2330 if (progressive_score > 0) {
2331 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2333 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2337 if (progressive_score > interlaced_score) {
2338 s->interlaced_dct = 1;
2340 dct_offset = wrap_y;
2341 uv_dct_offset = wrap_c;
2343 if (s->chroma_format == CHROMA_422)
2349 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2350 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2351 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2352 dest_y + dct_offset, wrap_y);
2353 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2354 dest_y + dct_offset + 8, wrap_y);
2356 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2360 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2361 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2362 if (!s->chroma_y_shift) { /* 422 */
2363 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2364 dest_cb + uv_dct_offset, wrap_c);
2365 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2366 dest_cr + uv_dct_offset, wrap_c);
2369 /* pre quantization */
2370 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2371 2 * s->qscale * s->qscale) {
2373 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2375 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2377 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2378 wrap_y, 8) < 20 * s->qscale)
2380 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2381 wrap_y, 8) < 20 * s->qscale)
2383 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2385 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2387 if (!s->chroma_y_shift) { /* 422 */
2388 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2389 dest_cb + uv_dct_offset,
2390 wrap_c, 8) < 20 * s->qscale)
2392 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2393 dest_cr + uv_dct_offset,
2394 wrap_c, 8) < 20 * s->qscale)
2400 if (s->quantizer_noise_shaping) {
2402 get_visual_weight(weight[0], ptr_y , wrap_y);
2404 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2406 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2408 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2410 get_visual_weight(weight[4], ptr_cb , wrap_c);
2412 get_visual_weight(weight[5], ptr_cr , wrap_c);
2413 if (!s->chroma_y_shift) { /* 422 */
2415 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2418 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2421 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2424 /* DCT & quantize */
2425 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2427 for (i = 0; i < mb_block_count; i++) {
2430 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2431 // FIXME we could decide to change to quantizer instead of
2433 // JS: I don't think that would be a good idea it could lower
2434 // quality instead of improve it. Just INTRADC clipping
2435 // deserves changes in quantizer
2437 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2439 s->block_last_index[i] = -1;
2441 if (s->quantizer_noise_shaping) {
2442 for (i = 0; i < mb_block_count; i++) {
2444 s->block_last_index[i] =
2445 dct_quantize_refine(s, s->block[i], weight[i],
2446 orig[i], i, s->qscale);
2451 if (s->luma_elim_threshold && !s->mb_intra)
2452 for (i = 0; i < 4; i++)
2453 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2454 if (s->chroma_elim_threshold && !s->mb_intra)
2455 for (i = 4; i < mb_block_count; i++)
2456 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2458 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2459 for (i = 0; i < mb_block_count; i++) {
2460 if (s->block_last_index[i] == -1)
2461 s->coded_score[i] = INT_MAX / 256;
2466 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2467 s->block_last_index[4] =
2468 s->block_last_index[5] = 0;
2470 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2471 if (!s->chroma_y_shift) { /* 422 / 444 */
2472 for (i=6; i<12; i++) {
2473 s->block_last_index[i] = 0;
2474 s->block[i][0] = s->block[4][0];
2479 // non c quantize code returns incorrect block_last_index FIXME
2480 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2481 for (i = 0; i < mb_block_count; i++) {
2483 if (s->block_last_index[i] > 0) {
2484 for (j = 63; j > 0; j--) {
2485 if (s->block[i][s->intra_scantable.permutated[j]])
2488 s->block_last_index[i] = j;
2493 /* huffman encode */
2494 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2495 case AV_CODEC_ID_MPEG1VIDEO:
2496 case AV_CODEC_ID_MPEG2VIDEO:
2497 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2498 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2500 case AV_CODEC_ID_MPEG4:
2501 if (CONFIG_MPEG4_ENCODER)
2502 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2504 case AV_CODEC_ID_MSMPEG4V2:
2505 case AV_CODEC_ID_MSMPEG4V3:
2506 case AV_CODEC_ID_WMV1:
2507 if (CONFIG_MSMPEG4_ENCODER)
2508 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2510 case AV_CODEC_ID_WMV2:
2511 if (CONFIG_WMV2_ENCODER)
2512 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2514 case AV_CODEC_ID_H261:
2515 if (CONFIG_H261_ENCODER)
2516 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2518 case AV_CODEC_ID_H263:
2519 case AV_CODEC_ID_H263P:
2520 case AV_CODEC_ID_FLV1:
2521 case AV_CODEC_ID_RV10:
2522 case AV_CODEC_ID_RV20:
2523 if (CONFIG_H263_ENCODER)
2524 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2526 case AV_CODEC_ID_MJPEG:
2527 case AV_CODEC_ID_AMV:
2528 if (CONFIG_MJPEG_ENCODER)
2529 ff_mjpeg_encode_mb(s, s->block);
2536 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2538 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2539 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2540 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2543 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2546 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2549 d->mb_skip_run= s->mb_skip_run;
2551 d->last_dc[i] = s->last_dc[i];
2554 d->mv_bits= s->mv_bits;
2555 d->i_tex_bits= s->i_tex_bits;
2556 d->p_tex_bits= s->p_tex_bits;
2557 d->i_count= s->i_count;
2558 d->f_count= s->f_count;
2559 d->b_count= s->b_count;
2560 d->skip_count= s->skip_count;
2561 d->misc_bits= s->misc_bits;
2565 d->qscale= s->qscale;
2566 d->dquant= s->dquant;
2568 d->esc3_level_length= s->esc3_level_length;
2571 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2574 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2575 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2578 d->mb_skip_run= s->mb_skip_run;
2580 d->last_dc[i] = s->last_dc[i];
2583 d->mv_bits= s->mv_bits;
2584 d->i_tex_bits= s->i_tex_bits;
2585 d->p_tex_bits= s->p_tex_bits;
2586 d->i_count= s->i_count;
2587 d->f_count= s->f_count;
2588 d->b_count= s->b_count;
2589 d->skip_count= s->skip_count;
2590 d->misc_bits= s->misc_bits;
2592 d->mb_intra= s->mb_intra;
2593 d->mb_skipped= s->mb_skipped;
2594 d->mv_type= s->mv_type;
2595 d->mv_dir= s->mv_dir;
2597 if(s->data_partitioning){
2599 d->tex_pb= s->tex_pb;
2603 d->block_last_index[i]= s->block_last_index[i];
2604 d->interlaced_dct= s->interlaced_dct;
2605 d->qscale= s->qscale;
2607 d->esc3_level_length= s->esc3_level_length;
2610 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2611 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2612 int *dmin, int *next_block, int motion_x, int motion_y)
2615 uint8_t *dest_backup[3];
2617 copy_context_before_encode(s, backup, type);
2619 s->block= s->blocks[*next_block];
2620 s->pb= pb[*next_block];
2621 if(s->data_partitioning){
2622 s->pb2 = pb2 [*next_block];
2623 s->tex_pb= tex_pb[*next_block];
2627 memcpy(dest_backup, s->dest, sizeof(s->dest));
2628 s->dest[0] = s->sc.rd_scratchpad;
2629 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2630 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2631 av_assert0(s->linesize >= 32); //FIXME
2634 encode_mb(s, motion_x, motion_y);
2636 score= put_bits_count(&s->pb);
2637 if(s->data_partitioning){
2638 score+= put_bits_count(&s->pb2);
2639 score+= put_bits_count(&s->tex_pb);
2642 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2643 ff_mpv_decode_mb(s, s->block);
2645 score *= s->lambda2;
2646 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2650 memcpy(s->dest, dest_backup, sizeof(s->dest));
2657 copy_context_after_encode(best, s, type);
2661 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2662 uint32_t *sq = ff_square_tab + 256;
2667 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2668 else if(w==8 && h==8)
2669 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2673 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2682 static int sse_mb(MpegEncContext *s){
2686 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2687 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2690 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2691 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2692 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2693 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2695 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2696 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2697 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2700 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2701 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2702 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2705 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2706 MpegEncContext *s= *(void**)arg;
2710 s->me.dia_size= s->avctx->pre_dia_size;
2711 s->first_slice_line=1;
2712 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2713 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2714 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2716 s->first_slice_line=0;
2724 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2725 MpegEncContext *s= *(void**)arg;
2727 ff_check_alignment();
2729 s->me.dia_size= s->avctx->dia_size;
2730 s->first_slice_line=1;
2731 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2732 s->mb_x=0; //for block init below
2733 ff_init_block_index(s);
2734 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2735 s->block_index[0]+=2;
2736 s->block_index[1]+=2;
2737 s->block_index[2]+=2;
2738 s->block_index[3]+=2;
2740 /* compute motion vector & mb_type and store in context */
2741 if(s->pict_type==AV_PICTURE_TYPE_B)
2742 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2744 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2746 s->first_slice_line=0;
2751 static int mb_var_thread(AVCodecContext *c, void *arg){
2752 MpegEncContext *s= *(void**)arg;
2755 ff_check_alignment();
2757 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2758 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2761 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2763 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2765 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2766 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2768 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2769 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2770 s->me.mb_var_sum_temp += varc;
2776 static void write_slice_end(MpegEncContext *s){
2777 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2778 if(s->partitioned_frame){
2779 ff_mpeg4_merge_partitions(s);
2782 ff_mpeg4_stuffing(&s->pb);
2783 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2784 ff_mjpeg_encode_stuffing(s);
2787 avpriv_align_put_bits(&s->pb);
2788 flush_put_bits(&s->pb);
2790 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2791 s->misc_bits+= get_bits_diff(s);
2794 static void write_mb_info(MpegEncContext *s)
2796 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2797 int offset = put_bits_count(&s->pb);
2798 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2799 int gobn = s->mb_y / s->gob_index;
2801 if (CONFIG_H263_ENCODER)
2802 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2803 bytestream_put_le32(&ptr, offset);
2804 bytestream_put_byte(&ptr, s->qscale);
2805 bytestream_put_byte(&ptr, gobn);
2806 bytestream_put_le16(&ptr, mba);
2807 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2808 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2809 /* 4MV not implemented */
2810 bytestream_put_byte(&ptr, 0); /* hmv2 */
2811 bytestream_put_byte(&ptr, 0); /* vmv2 */
2814 static void update_mb_info(MpegEncContext *s, int startcode)
2818 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2819 s->mb_info_size += 12;
2820 s->prev_mb_info = s->last_mb_info;
2823 s->prev_mb_info = put_bits_count(&s->pb)/8;
2824 /* This might have incremented mb_info_size above, and we return without
2825 * actually writing any info into that slot yet. But in that case,
2826 * this will be called again at the start of the after writing the
2827 * start code, actually writing the mb info. */
2831 s->last_mb_info = put_bits_count(&s->pb)/8;
2832 if (!s->mb_info_size)
2833 s->mb_info_size += 12;
2837 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2839 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2840 && s->slice_context_count == 1
2841 && s->pb.buf == s->avctx->internal->byte_buffer) {
2842 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2843 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2845 uint8_t *new_buffer = NULL;
2846 int new_buffer_size = 0;
2848 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2849 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2850 return AVERROR(ENOMEM);
2853 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2854 s->avctx->internal->byte_buffer_size + size_increase);
2856 return AVERROR(ENOMEM);
2858 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2859 av_free(s->avctx->internal->byte_buffer);
2860 s->avctx->internal->byte_buffer = new_buffer;
2861 s->avctx->internal->byte_buffer_size = new_buffer_size;
2862 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2863 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2864 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2866 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2867 return AVERROR(EINVAL);
2871 static int encode_thread(AVCodecContext *c, void *arg){
2872 MpegEncContext *s= *(void**)arg;
2873 int mb_x, mb_y, pdif = 0;
2874 int chr_h= 16>>s->chroma_y_shift;
2876 MpegEncContext best_s = { 0 }, backup_s;
2877 uint8_t bit_buf[2][MAX_MB_BYTES];
2878 uint8_t bit_buf2[2][MAX_MB_BYTES];
2879 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2880 PutBitContext pb[2], pb2[2], tex_pb[2];
2882 ff_check_alignment();
2885 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2886 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2887 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2890 s->last_bits= put_bits_count(&s->pb);
2901 /* init last dc values */
2902 /* note: quant matrix value (8) is implied here */
2903 s->last_dc[i] = 128 << s->intra_dc_precision;
2905 s->current_picture.encoding_error[i] = 0;
2907 if(s->codec_id==AV_CODEC_ID_AMV){
2908 s->last_dc[0] = 128*8/13;
2909 s->last_dc[1] = 128*8/14;
2910 s->last_dc[2] = 128*8/14;
2913 memset(s->last_mv, 0, sizeof(s->last_mv));
2917 switch(s->codec_id){
2918 case AV_CODEC_ID_H263:
2919 case AV_CODEC_ID_H263P:
2920 case AV_CODEC_ID_FLV1:
2921 if (CONFIG_H263_ENCODER)
2922 s->gob_index = H263_GOB_HEIGHT(s->height);
2924 case AV_CODEC_ID_MPEG4:
2925 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2926 ff_mpeg4_init_partitions(s);
2932 s->first_slice_line = 1;
2933 s->ptr_lastgob = s->pb.buf;
2934 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2938 ff_set_qscale(s, s->qscale);
2939 ff_init_block_index(s);
2941 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2942 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2943 int mb_type= s->mb_type[xy];
2947 int size_increase = s->avctx->internal->byte_buffer_size/4
2948 + s->mb_width*MAX_MB_BYTES;
2950 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2951 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2952 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2955 if(s->data_partitioning){
2956 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2957 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2958 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2964 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2965 ff_update_block_index(s);
2967 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2968 ff_h261_reorder_mb_index(s);
2969 xy= s->mb_y*s->mb_stride + s->mb_x;
2970 mb_type= s->mb_type[xy];
2973 /* write gob / video packet header */
2975 int current_packet_size, is_gob_start;
2977 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2979 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2981 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2983 switch(s->codec_id){
2984 case AV_CODEC_ID_H263:
2985 case AV_CODEC_ID_H263P:
2986 if(!s->h263_slice_structured)
2987 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2989 case AV_CODEC_ID_MPEG2VIDEO:
2990 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2991 case AV_CODEC_ID_MPEG1VIDEO:
2992 if(s->mb_skip_run) is_gob_start=0;
2994 case AV_CODEC_ID_MJPEG:
2995 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3000 if(s->start_mb_y != mb_y || mb_x!=0){
3003 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3004 ff_mpeg4_init_partitions(s);
3008 av_assert2((put_bits_count(&s->pb)&7) == 0);
3009 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3011 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3012 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3013 int d = 100 / s->error_rate;
3015 current_packet_size=0;
3016 s->pb.buf_ptr= s->ptr_lastgob;
3017 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3021 #if FF_API_RTP_CALLBACK
3022 FF_DISABLE_DEPRECATION_WARNINGS
3023 if (s->avctx->rtp_callback){
3024 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3025 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3027 FF_ENABLE_DEPRECATION_WARNINGS
3029 update_mb_info(s, 1);
3031 switch(s->codec_id){
3032 case AV_CODEC_ID_MPEG4:
3033 if (CONFIG_MPEG4_ENCODER) {
3034 ff_mpeg4_encode_video_packet_header(s);
3035 ff_mpeg4_clean_buffers(s);
3038 case AV_CODEC_ID_MPEG1VIDEO:
3039 case AV_CODEC_ID_MPEG2VIDEO:
3040 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3041 ff_mpeg1_encode_slice_header(s);
3042 ff_mpeg1_clean_buffers(s);
3045 case AV_CODEC_ID_H263:
3046 case AV_CODEC_ID_H263P:
3047 if (CONFIG_H263_ENCODER)
3048 ff_h263_encode_gob_header(s, mb_y);
3052 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3053 int bits= put_bits_count(&s->pb);
3054 s->misc_bits+= bits - s->last_bits;
3058 s->ptr_lastgob += current_packet_size;
3059 s->first_slice_line=1;
3060 s->resync_mb_x=mb_x;
3061 s->resync_mb_y=mb_y;
3065 if( (s->resync_mb_x == s->mb_x)
3066 && s->resync_mb_y+1 == s->mb_y){
3067 s->first_slice_line=0;
3071 s->dquant=0; //only for QP_RD
3073 update_mb_info(s, 0);
3075 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3077 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3079 copy_context_before_encode(&backup_s, s, -1);
3081 best_s.data_partitioning= s->data_partitioning;
3082 best_s.partitioned_frame= s->partitioned_frame;
3083 if(s->data_partitioning){
3084 backup_s.pb2= s->pb2;
3085 backup_s.tex_pb= s->tex_pb;
3088 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3089 s->mv_dir = MV_DIR_FORWARD;
3090 s->mv_type = MV_TYPE_16X16;
3092 s->mv[0][0][0] = s->p_mv_table[xy][0];
3093 s->mv[0][0][1] = s->p_mv_table[xy][1];
3094 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3095 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3097 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3098 s->mv_dir = MV_DIR_FORWARD;
3099 s->mv_type = MV_TYPE_FIELD;
3102 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3103 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3104 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3106 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3107 &dmin, &next_block, 0, 0);
3109 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3110 s->mv_dir = MV_DIR_FORWARD;
3111 s->mv_type = MV_TYPE_16X16;
3115 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3116 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3118 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3119 s->mv_dir = MV_DIR_FORWARD;
3120 s->mv_type = MV_TYPE_8X8;
3123 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3124 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3126 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3127 &dmin, &next_block, 0, 0);
3129 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3130 s->mv_dir = MV_DIR_FORWARD;
3131 s->mv_type = MV_TYPE_16X16;
3133 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3134 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3135 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3136 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3138 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3139 s->mv_dir = MV_DIR_BACKWARD;
3140 s->mv_type = MV_TYPE_16X16;
3142 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3143 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3144 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3145 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3147 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3148 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3149 s->mv_type = MV_TYPE_16X16;
3151 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3152 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3153 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3154 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3155 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3156 &dmin, &next_block, 0, 0);
3158 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3159 s->mv_dir = MV_DIR_FORWARD;
3160 s->mv_type = MV_TYPE_FIELD;
3163 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3164 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3165 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3167 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3168 &dmin, &next_block, 0, 0);
3170 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3171 s->mv_dir = MV_DIR_BACKWARD;
3172 s->mv_type = MV_TYPE_FIELD;
3175 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3176 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3177 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3179 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3180 &dmin, &next_block, 0, 0);
3182 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3183 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3184 s->mv_type = MV_TYPE_FIELD;
3186 for(dir=0; dir<2; dir++){
3188 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3189 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3190 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3193 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3194 &dmin, &next_block, 0, 0);
3196 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3198 s->mv_type = MV_TYPE_16X16;
3202 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3203 &dmin, &next_block, 0, 0);
3204 if(s->h263_pred || s->h263_aic){
3206 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3208 ff_clean_intra_table_entries(s); //old mode?
3212 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3213 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3214 const int last_qp= backup_s.qscale;
3217 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3218 static const int dquant_tab[4]={-1,1,-2,2};
3219 int storecoefs = s->mb_intra && s->dc_val[0];
3221 av_assert2(backup_s.dquant == 0);
3224 s->mv_dir= best_s.mv_dir;
3225 s->mv_type = MV_TYPE_16X16;
3226 s->mb_intra= best_s.mb_intra;
3227 s->mv[0][0][0] = best_s.mv[0][0][0];
3228 s->mv[0][0][1] = best_s.mv[0][0][1];
3229 s->mv[1][0][0] = best_s.mv[1][0][0];
3230 s->mv[1][0][1] = best_s.mv[1][0][1];
3232 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3233 for(; qpi<4; qpi++){
3234 int dquant= dquant_tab[qpi];
3235 qp= last_qp + dquant;
3236 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3238 backup_s.dquant= dquant;
3241 dc[i]= s->dc_val[0][ s->block_index[i] ];
3242 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3246 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3247 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3248 if(best_s.qscale != qp){
3251 s->dc_val[0][ s->block_index[i] ]= dc[i];
3252 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3259 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3260 int mx= s->b_direct_mv_table[xy][0];
3261 int my= s->b_direct_mv_table[xy][1];
3263 backup_s.dquant = 0;
3264 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3266 ff_mpeg4_set_direct_mv(s, mx, my);
3267 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3268 &dmin, &next_block, mx, my);
3270 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3271 backup_s.dquant = 0;
3272 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3274 ff_mpeg4_set_direct_mv(s, 0, 0);
3275 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3276 &dmin, &next_block, 0, 0);
3278 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3281 coded |= s->block_last_index[i];
3284 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3285 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3286 mx=my=0; //FIXME find the one we actually used
3287 ff_mpeg4_set_direct_mv(s, mx, my);
3288 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3296 s->mv_dir= best_s.mv_dir;
3297 s->mv_type = best_s.mv_type;
3299 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3300 s->mv[0][0][1] = best_s.mv[0][0][1];
3301 s->mv[1][0][0] = best_s.mv[1][0][0];
3302 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3305 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3306 &dmin, &next_block, mx, my);
3311 s->current_picture.qscale_table[xy] = best_s.qscale;
3313 copy_context_after_encode(s, &best_s, -1);
3315 pb_bits_count= put_bits_count(&s->pb);
3316 flush_put_bits(&s->pb);
3317 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3320 if(s->data_partitioning){
3321 pb2_bits_count= put_bits_count(&s->pb2);
3322 flush_put_bits(&s->pb2);
3323 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3324 s->pb2= backup_s.pb2;
3326 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3327 flush_put_bits(&s->tex_pb);
3328 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3329 s->tex_pb= backup_s.tex_pb;
3331 s->last_bits= put_bits_count(&s->pb);
3333 if (CONFIG_H263_ENCODER &&
3334 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3335 ff_h263_update_motion_val(s);
3337 if(next_block==0){ //FIXME 16 vs linesize16
3338 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3339 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3340 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3343 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3344 ff_mpv_decode_mb(s, s->block);
3346 int motion_x = 0, motion_y = 0;
3347 s->mv_type=MV_TYPE_16X16;
3348 // only one MB-Type possible
3351 case CANDIDATE_MB_TYPE_INTRA:
3354 motion_x= s->mv[0][0][0] = 0;
3355 motion_y= s->mv[0][0][1] = 0;
3357 case CANDIDATE_MB_TYPE_INTER:
3358 s->mv_dir = MV_DIR_FORWARD;
3360 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3361 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3363 case CANDIDATE_MB_TYPE_INTER_I:
3364 s->mv_dir = MV_DIR_FORWARD;
3365 s->mv_type = MV_TYPE_FIELD;
3368 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3369 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3370 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3373 case CANDIDATE_MB_TYPE_INTER4V:
3374 s->mv_dir = MV_DIR_FORWARD;
3375 s->mv_type = MV_TYPE_8X8;
3378 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3379 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3382 case CANDIDATE_MB_TYPE_DIRECT:
3383 if (CONFIG_MPEG4_ENCODER) {
3384 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3386 motion_x=s->b_direct_mv_table[xy][0];
3387 motion_y=s->b_direct_mv_table[xy][1];
3388 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3391 case CANDIDATE_MB_TYPE_DIRECT0:
3392 if (CONFIG_MPEG4_ENCODER) {
3393 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3395 ff_mpeg4_set_direct_mv(s, 0, 0);
3398 case CANDIDATE_MB_TYPE_BIDIR:
3399 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3401 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3402 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3403 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3404 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3406 case CANDIDATE_MB_TYPE_BACKWARD:
3407 s->mv_dir = MV_DIR_BACKWARD;
3409 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3410 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3412 case CANDIDATE_MB_TYPE_FORWARD:
3413 s->mv_dir = MV_DIR_FORWARD;
3415 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3416 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3418 case CANDIDATE_MB_TYPE_FORWARD_I:
3419 s->mv_dir = MV_DIR_FORWARD;
3420 s->mv_type = MV_TYPE_FIELD;
3423 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3424 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3425 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3428 case CANDIDATE_MB_TYPE_BACKWARD_I:
3429 s->mv_dir = MV_DIR_BACKWARD;
3430 s->mv_type = MV_TYPE_FIELD;
3433 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3434 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3435 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3438 case CANDIDATE_MB_TYPE_BIDIR_I:
3439 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3440 s->mv_type = MV_TYPE_FIELD;
3442 for(dir=0; dir<2; dir++){
3444 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3445 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3446 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3451 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3454 encode_mb(s, motion_x, motion_y);
3456 // RAL: Update last macroblock type
3457 s->last_mv_dir = s->mv_dir;
3459 if (CONFIG_H263_ENCODER &&
3460 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3461 ff_h263_update_motion_val(s);
3463 ff_mpv_decode_mb(s, s->block);
3466 /* clean the MV table in IPS frames for direct mode in B frames */
3467 if(s->mb_intra /* && I,P,S_TYPE */){
3468 s->p_mv_table[xy][0]=0;
3469 s->p_mv_table[xy][1]=0;
3472 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3476 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3477 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3479 s->current_picture.encoding_error[0] += sse(
3480 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3481 s->dest[0], w, h, s->linesize);
3482 s->current_picture.encoding_error[1] += sse(
3483 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3484 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3485 s->current_picture.encoding_error[2] += sse(
3486 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3487 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3490 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3491 ff_h263_loop_filter(s);
3493 ff_dlog(s->avctx, "MB %d %d bits\n",
3494 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3498 //not beautiful here but we must write it before flushing so it has to be here
3499 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3500 ff_msmpeg4_encode_ext_header(s);
3504 #if FF_API_RTP_CALLBACK
3505 FF_DISABLE_DEPRECATION_WARNINGS
3506 /* Send the last GOB if RTP */
3507 if (s->avctx->rtp_callback) {
3508 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3509 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3510 /* Call the RTP callback to send the last GOB */
3512 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3514 FF_ENABLE_DEPRECATION_WARNINGS
3520 #define MERGE(field) dst->field += src->field; src->field=0
3521 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3522 MERGE(me.scene_change_score);
3523 MERGE(me.mc_mb_var_sum_temp);
3524 MERGE(me.mb_var_sum_temp);
3527 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3530 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3531 MERGE(dct_count[1]);
3540 MERGE(er.error_count);
3541 MERGE(padding_bug_score);
3542 MERGE(current_picture.encoding_error[0]);
3543 MERGE(current_picture.encoding_error[1]);
3544 MERGE(current_picture.encoding_error[2]);
3546 if(dst->avctx->noise_reduction){
3547 for(i=0; i<64; i++){
3548 MERGE(dct_error_sum[0][i]);
3549 MERGE(dct_error_sum[1][i]);
3553 assert(put_bits_count(&src->pb) % 8 ==0);
3554 assert(put_bits_count(&dst->pb) % 8 ==0);
3555 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3556 flush_put_bits(&dst->pb);
3559 static int estimate_qp(MpegEncContext *s, int dry_run){
3560 if (s->next_lambda){
3561 s->current_picture_ptr->f->quality =
3562 s->current_picture.f->quality = s->next_lambda;
3563 if(!dry_run) s->next_lambda= 0;
3564 } else if (!s->fixed_qscale) {
3565 s->current_picture_ptr->f->quality =
3566 s->current_picture.f->quality = ff_rate_estimate_qscale(s, dry_run);
3567 if (s->current_picture.f->quality < 0)
3571 if(s->adaptive_quant){
3572 switch(s->codec_id){
3573 case AV_CODEC_ID_MPEG4:
3574 if (CONFIG_MPEG4_ENCODER)
3575 ff_clean_mpeg4_qscales(s);
3577 case AV_CODEC_ID_H263:
3578 case AV_CODEC_ID_H263P:
3579 case AV_CODEC_ID_FLV1:
3580 if (CONFIG_H263_ENCODER)
3581 ff_clean_h263_qscales(s);
3584 ff_init_qscale_tab(s);
3587 s->lambda= s->lambda_table[0];
3590 s->lambda = s->current_picture.f->quality;
3595 /* must be called before writing the header */
3596 static void set_frame_distances(MpegEncContext * s){
3597 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3598 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3600 if(s->pict_type==AV_PICTURE_TYPE_B){
3601 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3602 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3604 s->pp_time= s->time - s->last_non_b_time;
3605 s->last_non_b_time= s->time;
3606 assert(s->picture_number==0 || s->pp_time > 0);
3610 static int encode_picture(MpegEncContext *s, int picture_number)
3614 int context_count = s->slice_context_count;
3616 s->picture_number = picture_number;
3618 /* Reset the average MB variance */
3619 s->me.mb_var_sum_temp =
3620 s->me.mc_mb_var_sum_temp = 0;
3622 /* we need to initialize some time vars before we can encode b-frames */
3623 // RAL: Condition added for MPEG1VIDEO
3624 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3625 set_frame_distances(s);
3626 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3627 ff_set_mpeg4_time(s);
3629 s->me.scene_change_score=0;
3631 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3633 if(s->pict_type==AV_PICTURE_TYPE_I){
3634 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3635 else s->no_rounding=0;
3636 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3637 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3638 s->no_rounding ^= 1;
3641 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3642 if (estimate_qp(s,1) < 0)
3644 ff_get_2pass_fcode(s);
3645 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3646 if(s->pict_type==AV_PICTURE_TYPE_B)
3647 s->lambda= s->last_lambda_for[s->pict_type];
3649 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3653 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3654 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3655 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3656 s->q_chroma_intra_matrix = s->q_intra_matrix;
3657 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3660 s->mb_intra=0; //for the rate distortion & bit compare functions
3661 for(i=1; i<context_count; i++){
3662 ret = ff_update_duplicate_context(s->thread_context[i], s);
3670 /* Estimate motion for every MB */
3671 if(s->pict_type != AV_PICTURE_TYPE_I){
3672 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3673 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3674 if (s->pict_type != AV_PICTURE_TYPE_B) {
3675 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3676 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3680 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3681 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3683 for(i=0; i<s->mb_stride*s->mb_height; i++)
3684 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3686 if(!s->fixed_qscale){
3687 /* finding spatial complexity for I-frame rate control */
3688 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3691 for(i=1; i<context_count; i++){
3692 merge_context_after_me(s, s->thread_context[i]);
3694 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3695 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3698 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3699 s->pict_type= AV_PICTURE_TYPE_I;
3700 for(i=0; i<s->mb_stride*s->mb_height; i++)
3701 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3702 if(s->msmpeg4_version >= 3)
3704 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3705 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3709 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3710 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3712 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3714 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3715 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3716 s->f_code= FFMAX3(s->f_code, a, b);
3719 ff_fix_long_p_mvs(s);
3720 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3721 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3725 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3726 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3731 if(s->pict_type==AV_PICTURE_TYPE_B){
3734 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3735 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3736 s->f_code = FFMAX(a, b);
3738 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3739 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3740 s->b_code = FFMAX(a, b);
3742 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3743 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3744 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3745 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3746 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3748 for(dir=0; dir<2; dir++){
3751 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3752 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3753 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3754 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3762 if (estimate_qp(s, 0) < 0)
3765 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3766 s->pict_type == AV_PICTURE_TYPE_I &&
3767 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3768 s->qscale= 3; //reduce clipping problems
3770 if (s->out_format == FMT_MJPEG) {
3771 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3772 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3774 if (s->avctx->intra_matrix) {
3776 luma_matrix = s->avctx->intra_matrix;
3778 if (s->avctx->chroma_intra_matrix)
3779 chroma_matrix = s->avctx->chroma_intra_matrix;
3781 /* for mjpeg, we do include qscale in the matrix */
3783 int j = s->idsp.idct_permutation[i];
3785 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3786 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3788 s->y_dc_scale_table=
3789 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3790 s->chroma_intra_matrix[0] =
3791 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3792 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3793 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3794 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3795 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3798 if(s->codec_id == AV_CODEC_ID_AMV){
3799 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3800 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3802 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3804 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3805 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3807 s->y_dc_scale_table= y;
3808 s->c_dc_scale_table= c;
3809 s->intra_matrix[0] = 13;
3810 s->chroma_intra_matrix[0] = 14;
3811 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3812 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3813 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3814 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3818 //FIXME var duplication
3819 s->current_picture_ptr->f->key_frame =
3820 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3821 s->current_picture_ptr->f->pict_type =
3822 s->current_picture.f->pict_type = s->pict_type;
3824 if (s->current_picture.f->key_frame)
3825 s->picture_in_gop_number=0;
3827 s->mb_x = s->mb_y = 0;
3828 s->last_bits= put_bits_count(&s->pb);
3829 switch(s->out_format) {
3831 if (CONFIG_MJPEG_ENCODER)
3832 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3833 s->intra_matrix, s->chroma_intra_matrix);
3836 if (CONFIG_H261_ENCODER)
3837 ff_h261_encode_picture_header(s, picture_number);
3840 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3841 ff_wmv2_encode_picture_header(s, picture_number);
3842 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3843 ff_msmpeg4_encode_picture_header(s, picture_number);
3844 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3845 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3848 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3849 ret = ff_rv10_encode_picture_header(s, picture_number);
3853 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3854 ff_rv20_encode_picture_header(s, picture_number);
3855 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3856 ff_flv_encode_picture_header(s, picture_number);
3857 else if (CONFIG_H263_ENCODER)
3858 ff_h263_encode_picture_header(s, picture_number);
3861 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3862 ff_mpeg1_encode_picture_header(s, picture_number);
3867 bits= put_bits_count(&s->pb);
3868 s->header_bits= bits - s->last_bits;
3870 for(i=1; i<context_count; i++){
3871 update_duplicate_context_after_me(s->thread_context[i], s);
3873 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3874 for(i=1; i<context_count; i++){
3875 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3876 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3877 merge_context_after_encode(s, s->thread_context[i]);
3883 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3884 const int intra= s->mb_intra;
3887 s->dct_count[intra]++;
3889 for(i=0; i<64; i++){
3890 int level= block[i];
3894 s->dct_error_sum[intra][i] += level;
3895 level -= s->dct_offset[intra][i];
3896 if(level<0) level=0;
3898 s->dct_error_sum[intra][i] -= level;
3899 level += s->dct_offset[intra][i];
3900 if(level>0) level=0;
3907 static int dct_quantize_trellis_c(MpegEncContext *s,
3908 int16_t *block, int n,
3909 int qscale, int *overflow){
3911 const uint16_t *matrix;
3912 const uint8_t *scantable= s->intra_scantable.scantable;
3913 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3915 unsigned int threshold1, threshold2;
3927 int coeff_count[64];
3928 int qmul, qadd, start_i, last_non_zero, i, dc;
3929 const int esc_length= s->ac_esc_length;
3931 uint8_t * last_length;
3932 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3935 s->fdsp.fdct(block);
3937 if(s->dct_error_sum)
3938 s->denoise_dct(s, block);
3940 qadd= ((qscale-1)|1)*8;
3942 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3943 else mpeg2_qscale = qscale << 1;
3954 /* For AIC we skip quant/dequant of INTRADC */
3959 /* note: block[0] is assumed to be positive */
3960 block[0] = (block[0] + (q >> 1)) / q;
3963 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3964 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3965 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3966 bias= 1<<(QMAT_SHIFT-1);
3968 if (n > 3 && s->intra_chroma_ac_vlc_length) {
3969 length = s->intra_chroma_ac_vlc_length;
3970 last_length= s->intra_chroma_ac_vlc_last_length;
3972 length = s->intra_ac_vlc_length;
3973 last_length= s->intra_ac_vlc_last_length;
3978 qmat = s->q_inter_matrix[qscale];
3979 matrix = s->inter_matrix;
3980 length = s->inter_ac_vlc_length;
3981 last_length= s->inter_ac_vlc_last_length;
3985 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3986 threshold2= (threshold1<<1);
3988 for(i=63; i>=start_i; i--) {
3989 const int j = scantable[i];
3990 int level = block[j] * qmat[j];
3992 if(((unsigned)(level+threshold1))>threshold2){
3998 for(i=start_i; i<=last_non_zero; i++) {
3999 const int j = scantable[i];
4000 int level = block[j] * qmat[j];
4002 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4003 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4004 if(((unsigned)(level+threshold1))>threshold2){
4006 level= (bias + level)>>QMAT_SHIFT;
4008 coeff[1][i]= level-1;
4009 // coeff[2][k]= level-2;
4011 level= (bias - level)>>QMAT_SHIFT;
4012 coeff[0][i]= -level;
4013 coeff[1][i]= -level+1;
4014 // coeff[2][k]= -level+2;
4016 coeff_count[i]= FFMIN(level, 2);
4017 av_assert2(coeff_count[i]);
4020 coeff[0][i]= (level>>31)|1;
4025 *overflow= s->max_qcoeff < max; //overflow might have happened
4027 if(last_non_zero < start_i){
4028 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4029 return last_non_zero;
4032 score_tab[start_i]= 0;
4033 survivor[0]= start_i;
4036 for(i=start_i; i<=last_non_zero; i++){
4037 int level_index, j, zero_distortion;
4038 int dct_coeff= FFABS(block[ scantable[i] ]);
4039 int best_score=256*256*256*120;
4041 if (s->fdsp.fdct == ff_fdct_ifast)
4042 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4043 zero_distortion= dct_coeff*dct_coeff;
4045 for(level_index=0; level_index < coeff_count[i]; level_index++){
4047 int level= coeff[level_index][i];
4048 const int alevel= FFABS(level);
4053 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4054 unquant_coeff= alevel*qmul + qadd;
4055 } else if(s->out_format == FMT_MJPEG) {
4056 j = s->idsp.idct_permutation[scantable[i]];
4057 unquant_coeff = alevel * matrix[j] * 8;
4059 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4061 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4062 unquant_coeff = (unquant_coeff - 1) | 1;
4064 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4065 unquant_coeff = (unquant_coeff - 1) | 1;
4070 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4072 if((level&(~127)) == 0){
4073 for(j=survivor_count-1; j>=0; j--){
4074 int run= i - survivor[j];
4075 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4076 score += score_tab[i-run];
4078 if(score < best_score){
4081 level_tab[i+1]= level-64;
4085 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4086 for(j=survivor_count-1; j>=0; j--){
4087 int run= i - survivor[j];
4088 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4089 score += score_tab[i-run];
4090 if(score < last_score){
4093 last_level= level-64;
4099 distortion += esc_length*lambda;
4100 for(j=survivor_count-1; j>=0; j--){
4101 int run= i - survivor[j];
4102 int score= distortion + score_tab[i-run];
4104 if(score < best_score){
4107 level_tab[i+1]= level-64;
4111 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4112 for(j=survivor_count-1; j>=0; j--){
4113 int run= i - survivor[j];
4114 int score= distortion + score_tab[i-run];
4115 if(score < last_score){
4118 last_level= level-64;
4126 score_tab[i+1]= best_score;
4128 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
4129 if(last_non_zero <= 27){
4130 for(; survivor_count; survivor_count--){
4131 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4135 for(; survivor_count; survivor_count--){
4136 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4141 survivor[ survivor_count++ ]= i+1;
4144 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4145 last_score= 256*256*256*120;
4146 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4147 int score= score_tab[i];
4148 if(i) score += lambda*2; //FIXME exacter?
4150 if(score < last_score){
4153 last_level= level_tab[i];
4154 last_run= run_tab[i];
4159 s->coded_score[n] = last_score;
4161 dc= FFABS(block[0]);
4162 last_non_zero= last_i - 1;
4163 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4165 if(last_non_zero < start_i)
4166 return last_non_zero;
4168 if(last_non_zero == 0 && start_i == 0){
4170 int best_score= dc * dc;
4172 for(i=0; i<coeff_count[0]; i++){
4173 int level= coeff[i][0];
4174 int alevel= FFABS(level);
4175 int unquant_coeff, score, distortion;
4177 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4178 unquant_coeff= (alevel*qmul + qadd)>>3;
4180 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4181 unquant_coeff = (unquant_coeff - 1) | 1;
4183 unquant_coeff = (unquant_coeff + 4) >> 3;
4184 unquant_coeff<<= 3 + 3;
4186 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4188 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4189 else score= distortion + esc_length*lambda;
4191 if(score < best_score){
4193 best_level= level - 64;
4196 block[0]= best_level;
4197 s->coded_score[n] = best_score - dc*dc;
4198 if(best_level == 0) return -1;
4199 else return last_non_zero;
4203 av_assert2(last_level);
4205 block[ perm_scantable[last_non_zero] ]= last_level;
4208 for(; i>start_i; i -= run_tab[i] + 1){
4209 block[ perm_scantable[i-1] ]= level_tab[i];
4212 return last_non_zero;
4215 //#define REFINE_STATS 1
4216 static int16_t basis[64][64];
4218 static void build_basis(uint8_t *perm){
4225 double s= 0.25*(1<<BASIS_SHIFT);
4227 int perm_index= perm[index];
4228 if(i==0) s*= sqrt(0.5);
4229 if(j==0) s*= sqrt(0.5);
4230 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4237 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4238 int16_t *block, int16_t *weight, int16_t *orig,
4241 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4242 const uint8_t *scantable= s->intra_scantable.scantable;
4243 const uint8_t *perm_scantable= s->intra_scantable.permutated;
4244 // unsigned int threshold1, threshold2;
4249 int qmul, qadd, start_i, last_non_zero, i, dc;
4251 uint8_t * last_length;
4253 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4256 static int after_last=0;
4257 static int to_zero=0;
4258 static int from_zero=0;
4261 static int messed_sign=0;
4264 if(basis[0][0] == 0)
4265 build_basis(s->idsp.idct_permutation);
4276 /* For AIC we skip quant/dequant of INTRADC */
4280 q <<= RECON_SHIFT-3;
4281 /* note: block[0] is assumed to be positive */
4283 // block[0] = (block[0] + (q >> 1)) / q;
4285 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4286 // bias= 1<<(QMAT_SHIFT-1);
4287 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4288 length = s->intra_chroma_ac_vlc_length;
4289 last_length= s->intra_chroma_ac_vlc_last_length;
4291 length = s->intra_ac_vlc_length;
4292 last_length= s->intra_ac_vlc_last_length;
4297 length = s->inter_ac_vlc_length;
4298 last_length= s->inter_ac_vlc_last_length;
4300 last_non_zero = s->block_last_index[n];
4305 dc += (1<<(RECON_SHIFT-1));
4306 for(i=0; i<64; i++){
4307 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
4310 STOP_TIMER("memset rem[]")}
4313 for(i=0; i<64; i++){
4318 w= FFABS(weight[i]) + qns*one;
4319 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4322 // w=weight[i] = (63*qns + (w/2)) / w;
4325 av_assert2(w<(1<<6));
4328 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4334 for(i=start_i; i<=last_non_zero; i++){
4335 int j= perm_scantable[i];
4336 const int level= block[j];
4340 if(level<0) coeff= qmul*level - qadd;
4341 else coeff= qmul*level + qadd;
4342 run_tab[rle_index++]=run;
4345 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4351 if(last_non_zero>0){
4352 STOP_TIMER("init rem[]")
4359 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4362 int run2, best_unquant_change=0, analyze_gradient;
4366 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4368 if(analyze_gradient){
4372 for(i=0; i<64; i++){
4375 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4378 STOP_TIMER("rem*w*w")}
4388 const int level= block[0];
4389 int change, old_coeff;
4391 av_assert2(s->mb_intra);
4395 for(change=-1; change<=1; change+=2){
4396 int new_level= level + change;
4397 int score, new_coeff;
4399 new_coeff= q*new_level;
4400 if(new_coeff >= 2048 || new_coeff < 0)
4403 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4404 new_coeff - old_coeff);
4405 if(score<best_score){
4408 best_change= change;
4409 best_unquant_change= new_coeff - old_coeff;
4416 run2= run_tab[rle_index++];
4420 for(i=start_i; i<64; i++){
4421 int j= perm_scantable[i];
4422 const int level= block[j];
4423 int change, old_coeff;
4425 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4429 if(level<0) old_coeff= qmul*level - qadd;
4430 else old_coeff= qmul*level + qadd;
4431 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4435 av_assert2(run2>=0 || i >= last_non_zero );
4438 for(change=-1; change<=1; change+=2){
4439 int new_level= level + change;
4440 int score, new_coeff, unquant_change;
4443 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4447 if(new_level<0) new_coeff= qmul*new_level - qadd;
4448 else new_coeff= qmul*new_level + qadd;
4449 if(new_coeff >= 2048 || new_coeff <= -2048)
4451 //FIXME check for overflow
4454 if(level < 63 && level > -63){
4455 if(i < last_non_zero)
4456 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4457 - length[UNI_AC_ENC_INDEX(run, level+64)];
4459 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4460 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4463 av_assert2(FFABS(new_level)==1);
4465 if(analyze_gradient){
4466 int g= d1[ scantable[i] ];
4467 if(g && (g^new_level) >= 0)
4471 if(i < last_non_zero){
4472 int next_i= i + run2 + 1;
4473 int next_level= block[ perm_scantable[next_i] ] + 64;
4475 if(next_level&(~127))
4478 if(next_i < last_non_zero)
4479 score += length[UNI_AC_ENC_INDEX(run, 65)]
4480 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4481 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4483 score += length[UNI_AC_ENC_INDEX(run, 65)]
4484 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4485 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4487 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4489 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4490 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4496 av_assert2(FFABS(level)==1);
4498 if(i < last_non_zero){
4499 int next_i= i + run2 + 1;
4500 int next_level= block[ perm_scantable[next_i] ] + 64;
4502 if(next_level&(~127))
4505 if(next_i < last_non_zero)
4506 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4507 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4508 - length[UNI_AC_ENC_INDEX(run, 65)];
4510 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4511 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4512 - length[UNI_AC_ENC_INDEX(run, 65)];
4514 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4516 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4517 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4524 unquant_change= new_coeff - old_coeff;
4525 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4527 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4529 if(score<best_score){
4532 best_change= change;
4533 best_unquant_change= unquant_change;
4537 prev_level= level + 64;
4538 if(prev_level&(~127))
4547 STOP_TIMER("iterative step")}
4551 int j= perm_scantable[ best_coeff ];
4553 block[j] += best_change;
4555 if(best_coeff > last_non_zero){
4556 last_non_zero= best_coeff;
4557 av_assert2(block[j]);
4564 if(block[j] - best_change){
4565 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4577 for(; last_non_zero>=start_i; last_non_zero--){
4578 if(block[perm_scantable[last_non_zero]])
4584 if(256*256*256*64 % count == 0){
4585 av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4590 for(i=start_i; i<=last_non_zero; i++){
4591 int j= perm_scantable[i];
4592 const int level= block[j];
4595 run_tab[rle_index++]=run;
4602 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4608 if(last_non_zero>0){
4609 STOP_TIMER("iterative search")
4614 return last_non_zero;
4618 * Permute an 8x8 block according to permuatation.
4619 * @param block the block which will be permuted according to
4620 * the given permutation vector
4621 * @param permutation the permutation vector
4622 * @param last the last non zero coefficient in scantable order, used to
4623 * speed the permutation up
4624 * @param scantable the used scantable, this is only used to speed the
4625 * permutation up, the block is not (inverse) permutated
4626 * to scantable order!
4628 void ff_block_permute(int16_t *block, uint8_t *permutation,
4629 const uint8_t *scantable, int last)
4636 //FIXME it is ok but not clean and might fail for some permutations
4637 // if (permutation[1] == 1)
4640 for (i = 0; i <= last; i++) {
4641 const int j = scantable[i];
4646 for (i = 0; i <= last; i++) {
4647 const int j = scantable[i];
4648 const int perm_j = permutation[j];
4649 block[perm_j] = temp[j];
4653 int ff_dct_quantize_c(MpegEncContext *s,
4654 int16_t *block, int n,
4655 int qscale, int *overflow)
4657 int i, j, level, last_non_zero, q, start_i;
4659 const uint8_t *scantable= s->intra_scantable.scantable;
4662 unsigned int threshold1, threshold2;
4664 s->fdsp.fdct(block);
4666 if(s->dct_error_sum)
4667 s->denoise_dct(s, block);
4677 /* For AIC we skip quant/dequant of INTRADC */
4680 /* note: block[0] is assumed to be positive */
4681 block[0] = (block[0] + (q >> 1)) / q;
4684 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4685 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4689 qmat = s->q_inter_matrix[qscale];
4690 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4692 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4693 threshold2= (threshold1<<1);
4694 for(i=63;i>=start_i;i--) {
4696 level = block[j] * qmat[j];
4698 if(((unsigned)(level+threshold1))>threshold2){
4705 for(i=start_i; i<=last_non_zero; i++) {
4707 level = block[j] * qmat[j];
4709 // if( bias+level >= (1<<QMAT_SHIFT)
4710 // || bias-level >= (1<<QMAT_SHIFT)){
4711 if(((unsigned)(level+threshold1))>threshold2){
4713 level= (bias + level)>>QMAT_SHIFT;
4716 level= (bias - level)>>QMAT_SHIFT;
4724 *overflow= s->max_qcoeff < max; //overflow might have happened
4726 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4727 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4728 ff_block_permute(block, s->idsp.idct_permutation,
4729 scantable, last_non_zero);
4731 return last_non_zero;
4734 #define OFFSET(x) offsetof(MpegEncContext, x)
4735 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4736 static const AVOption h263_options[] = {
4737 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4738 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4743 static const AVClass h263_class = {
4744 .class_name = "H.263 encoder",
4745 .item_name = av_default_item_name,
4746 .option = h263_options,
4747 .version = LIBAVUTIL_VERSION_INT,
4750 AVCodec ff_h263_encoder = {
4752 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4753 .type = AVMEDIA_TYPE_VIDEO,
4754 .id = AV_CODEC_ID_H263,
4755 .priv_data_size = sizeof(MpegEncContext),
4756 .init = ff_mpv_encode_init,
4757 .encode2 = ff_mpv_encode_picture,
4758 .close = ff_mpv_encode_end,
4759 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4760 .priv_class = &h263_class,
4763 static const AVOption h263p_options[] = {
4764 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4765 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4766 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4767 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4771 static const AVClass h263p_class = {
4772 .class_name = "H.263p encoder",
4773 .item_name = av_default_item_name,
4774 .option = h263p_options,
4775 .version = LIBAVUTIL_VERSION_INT,
4778 AVCodec ff_h263p_encoder = {
4780 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4781 .type = AVMEDIA_TYPE_VIDEO,
4782 .id = AV_CODEC_ID_H263P,
4783 .priv_data_size = sizeof(MpegEncContext),
4784 .init = ff_mpv_encode_init,
4785 .encode2 = ff_mpv_encode_picture,
4786 .close = ff_mpv_encode_end,
4787 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4788 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4789 .priv_class = &h263p_class,
4792 static const AVClass msmpeg4v2_class = {
4793 .class_name = "msmpeg4v2 encoder",
4794 .item_name = av_default_item_name,
4795 .option = ff_mpv_generic_options,
4796 .version = LIBAVUTIL_VERSION_INT,
4799 AVCodec ff_msmpeg4v2_encoder = {
4800 .name = "msmpeg4v2",
4801 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4802 .type = AVMEDIA_TYPE_VIDEO,
4803 .id = AV_CODEC_ID_MSMPEG4V2,
4804 .priv_data_size = sizeof(MpegEncContext),
4805 .init = ff_mpv_encode_init,
4806 .encode2 = ff_mpv_encode_picture,
4807 .close = ff_mpv_encode_end,
4808 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4809 .priv_class = &msmpeg4v2_class,
4812 static const AVClass msmpeg4v3_class = {
4813 .class_name = "msmpeg4v3 encoder",
4814 .item_name = av_default_item_name,
4815 .option = ff_mpv_generic_options,
4816 .version = LIBAVUTIL_VERSION_INT,
4819 AVCodec ff_msmpeg4v3_encoder = {
4821 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4822 .type = AVMEDIA_TYPE_VIDEO,
4823 .id = AV_CODEC_ID_MSMPEG4V3,
4824 .priv_data_size = sizeof(MpegEncContext),
4825 .init = ff_mpv_encode_init,
4826 .encode2 = ff_mpv_encode_picture,
4827 .close = ff_mpv_encode_end,
4828 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4829 .priv_class = &msmpeg4v3_class,
4832 static const AVClass wmv1_class = {
4833 .class_name = "wmv1 encoder",
4834 .item_name = av_default_item_name,
4835 .option = ff_mpv_generic_options,
4836 .version = LIBAVUTIL_VERSION_INT,
4839 AVCodec ff_wmv1_encoder = {
4841 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4842 .type = AVMEDIA_TYPE_VIDEO,
4843 .id = AV_CODEC_ID_WMV1,
4844 .priv_data_size = sizeof(MpegEncContext),
4845 .init = ff_mpv_encode_init,
4846 .encode2 = ff_mpv_encode_picture,
4847 .close = ff_mpv_encode_end,
4848 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4849 .priv_class = &wmv1_class,