2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "libavutil/timer.h"
46 #include "mpegvideo.h"
47 #include "mpegvideodata.h"
51 #include "mjpegenc_common.h"
53 #include "mpegutils.h"
56 #include "pixblockdsp.h"
60 #include "aandcttab.h"
62 #include "mpeg4video.h"
64 #include "bytestream.h"
70 #define QUANT_BIAS_SHIFT 8
72 #define QMAT_SHIFT_MMX 16
75 static int encode_picture(MpegEncContext *s, int picture_number);
76 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
77 static int sse_mb(MpegEncContext *s);
78 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
79 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
81 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
82 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
84 const AVOption ff_mpv_generic_options[] = {
89 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
90 uint16_t (*qmat16)[2][64],
91 const uint16_t *quant_matrix,
92 int bias, int qmin, int qmax, int intra)
94 FDCTDSPContext *fdsp = &s->fdsp;
98 for (qscale = qmin; qscale <= qmax; qscale++) {
102 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
103 else qscale2 = qscale << 1;
105 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
107 fdsp->fdct == ff_faandct ||
108 #endif /* CONFIG_FAANDCT */
109 fdsp->fdct == ff_jpeg_fdct_islow_10) {
110 for (i = 0; i < 64; i++) {
111 const int j = s->idsp.idct_permutation[i];
112 int64_t den = (int64_t) qscale2 * quant_matrix[j];
113 /* 16 <= qscale * quant_matrix[i] <= 7905
114 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
115 * 19952 <= x <= 249205026
116 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
117 * 3444240 >= (1 << 36) / (x) >= 275 */
119 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
121 } else if (fdsp->fdct == ff_fdct_ifast) {
122 for (i = 0; i < 64; i++) {
123 const int j = s->idsp.idct_permutation[i];
124 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
125 /* 16 <= qscale * quant_matrix[i] <= 7905
126 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
127 * 19952 <= x <= 249205026
128 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
129 * 3444240 >= (1 << 36) / (x) >= 275 */
131 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
134 for (i = 0; i < 64; i++) {
135 const int j = s->idsp.idct_permutation[i];
136 int64_t den = (int64_t) qscale2 * quant_matrix[j];
137 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
138 * Assume x = qscale * quant_matrix[i]
140 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
141 * so 32768 >= (1 << 19) / (x) >= 67 */
142 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
143 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
144 // (qscale * quant_matrix[i]);
145 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
147 if (qmat16[qscale][0][i] == 0 ||
148 qmat16[qscale][0][i] == 128 * 256)
149 qmat16[qscale][0][i] = 128 * 256 - 1;
150 qmat16[qscale][1][i] =
151 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
152 qmat16[qscale][0][i]);
156 for (i = intra; i < 64; i++) {
158 if (fdsp->fdct == ff_fdct_ifast) {
159 max = (8191LL * ff_aanscales[i]) >> 14;
161 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
167 av_log(NULL, AV_LOG_INFO,
168 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
173 static inline void update_qscale(MpegEncContext *s)
175 if (s->q_scale_type == 1 && 0) {
177 int bestdiff=INT_MAX;
180 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
181 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
182 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
183 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
185 if (diff < bestdiff) {
192 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
193 (FF_LAMBDA_SHIFT + 7);
194 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
197 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
201 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
207 for (i = 0; i < 64; i++) {
208 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
215 * init s->current_picture.qscale_table from s->lambda_table
217 void ff_init_qscale_tab(MpegEncContext *s)
219 int8_t * const qscale_table = s->current_picture.qscale_table;
222 for (i = 0; i < s->mb_num; i++) {
223 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
224 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
225 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
230 static void update_duplicate_context_after_me(MpegEncContext *dst,
233 #define COPY(a) dst->a= src->a
235 COPY(current_picture);
241 COPY(picture_in_gop_number);
242 COPY(gop_picture_number);
243 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
244 COPY(progressive_frame); // FIXME don't set in encode_header
245 COPY(partitioned_frame); // FIXME don't set in encode_header
250 * Set the given MpegEncContext to defaults for encoding.
251 * the changed fields will not depend upon the prior state of the MpegEncContext.
253 static void mpv_encode_defaults(MpegEncContext *s)
256 ff_mpv_common_defaults(s);
258 for (i = -16; i < 16; i++) {
259 default_fcode_tab[i + MAX_MV] = 1;
261 s->me.mv_penalty = default_mv_penalty;
262 s->fcode_tab = default_fcode_tab;
264 s->input_picture_number = 0;
265 s->picture_in_gop_number = 0;
268 av_cold int ff_dct_encode_init(MpegEncContext *s) {
270 ff_dct_encode_init_x86(s);
272 if (CONFIG_H263_ENCODER)
273 ff_h263dsp_init(&s->h263dsp);
274 if (!s->dct_quantize)
275 s->dct_quantize = ff_dct_quantize_c;
277 s->denoise_dct = denoise_dct_c;
278 s->fast_dct_quantize = s->dct_quantize;
279 if (s->avctx->trellis)
280 s->dct_quantize = dct_quantize_trellis_c;
285 /* init video encoder */
286 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
288 MpegEncContext *s = avctx->priv_data;
289 AVCPBProperties *cpb_props;
290 int i, ret, format_supported;
292 mpv_encode_defaults(s);
294 switch (avctx->codec_id) {
295 case AV_CODEC_ID_MPEG2VIDEO:
296 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
297 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
298 av_log(avctx, AV_LOG_ERROR,
299 "only YUV420 and YUV422 are supported\n");
303 case AV_CODEC_ID_MJPEG:
304 case AV_CODEC_ID_AMV:
305 format_supported = 0;
306 /* JPEG color space */
307 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
308 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
309 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
310 (avctx->color_range == AVCOL_RANGE_JPEG &&
311 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
312 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
313 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
314 format_supported = 1;
315 /* MPEG color space */
316 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
317 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
318 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
319 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
320 format_supported = 1;
322 if (!format_supported) {
323 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
328 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
329 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
334 switch (avctx->pix_fmt) {
335 case AV_PIX_FMT_YUVJ444P:
336 case AV_PIX_FMT_YUV444P:
337 s->chroma_format = CHROMA_444;
339 case AV_PIX_FMT_YUVJ422P:
340 case AV_PIX_FMT_YUV422P:
341 s->chroma_format = CHROMA_422;
343 case AV_PIX_FMT_YUVJ420P:
344 case AV_PIX_FMT_YUV420P:
346 s->chroma_format = CHROMA_420;
350 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
352 #if FF_API_PRIVATE_OPT
353 FF_DISABLE_DEPRECATION_WARNINGS
354 if (avctx->rtp_payload_size)
355 s->rtp_payload_size = avctx->rtp_payload_size;
356 if (avctx->me_penalty_compensation)
357 s->me_penalty_compensation = avctx->me_penalty_compensation;
359 s->me_pre = avctx->pre_me;
360 FF_ENABLE_DEPRECATION_WARNINGS
363 s->bit_rate = avctx->bit_rate;
364 s->width = avctx->width;
365 s->height = avctx->height;
366 if (avctx->gop_size > 600 &&
367 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
368 av_log(avctx, AV_LOG_WARNING,
369 "keyframe interval too large!, reducing it from %d to %d\n",
370 avctx->gop_size, 600);
371 avctx->gop_size = 600;
373 s->gop_size = avctx->gop_size;
375 if (avctx->max_b_frames > MAX_B_FRAMES) {
376 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
377 "is %d.\n", MAX_B_FRAMES);
378 avctx->max_b_frames = MAX_B_FRAMES;
380 s->max_b_frames = avctx->max_b_frames;
381 s->codec_id = avctx->codec->id;
382 s->strict_std_compliance = avctx->strict_std_compliance;
383 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
384 s->rtp_mode = !!s->rtp_payload_size;
385 s->intra_dc_precision = avctx->intra_dc_precision;
387 // workaround some differences between how applications specify dc precision
388 if (s->intra_dc_precision < 0) {
389 s->intra_dc_precision += 8;
390 } else if (s->intra_dc_precision >= 8)
391 s->intra_dc_precision -= 8;
393 if (s->intra_dc_precision < 0) {
394 av_log(avctx, AV_LOG_ERROR,
395 "intra dc precision must be positive, note some applications use"
396 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
397 return AVERROR(EINVAL);
400 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
401 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
402 return AVERROR(EINVAL);
404 s->user_specified_pts = AV_NOPTS_VALUE;
406 if (s->gop_size <= 1) {
413 #if FF_API_MOTION_EST
414 FF_DISABLE_DEPRECATION_WARNINGS
415 s->me_method = avctx->me_method;
416 FF_ENABLE_DEPRECATION_WARNINGS
420 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
423 FF_DISABLE_DEPRECATION_WARNINGS
424 if (avctx->border_masking != 0.0)
425 s->border_masking = avctx->border_masking;
426 FF_ENABLE_DEPRECATION_WARNINGS
429 s->adaptive_quant = (s->avctx->lumi_masking ||
430 s->avctx->dark_masking ||
431 s->avctx->temporal_cplx_masking ||
432 s->avctx->spatial_cplx_masking ||
433 s->avctx->p_masking ||
435 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
438 s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
440 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
441 switch(avctx->codec_id) {
442 case AV_CODEC_ID_MPEG1VIDEO:
443 case AV_CODEC_ID_MPEG2VIDEO:
444 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
446 case AV_CODEC_ID_MPEG4:
447 case AV_CODEC_ID_MSMPEG4V1:
448 case AV_CODEC_ID_MSMPEG4V2:
449 case AV_CODEC_ID_MSMPEG4V3:
450 if (avctx->rc_max_rate >= 15000000) {
451 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
452 } else if(avctx->rc_max_rate >= 2000000) {
453 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
454 } else if(avctx->rc_max_rate >= 384000) {
455 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
457 avctx->rc_buffer_size = 40;
458 avctx->rc_buffer_size *= 16384;
461 if (avctx->rc_buffer_size) {
462 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
466 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
467 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
471 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
472 av_log(avctx, AV_LOG_INFO,
473 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
476 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
477 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
481 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
482 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
486 if (avctx->rc_max_rate &&
487 avctx->rc_max_rate == avctx->bit_rate &&
488 avctx->rc_max_rate != avctx->rc_min_rate) {
489 av_log(avctx, AV_LOG_INFO,
490 "impossible bitrate constraints, this will fail\n");
493 if (avctx->rc_buffer_size &&
494 avctx->bit_rate * (int64_t)avctx->time_base.num >
495 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
496 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
500 if (!s->fixed_qscale &&
501 avctx->bit_rate * av_q2d(avctx->time_base) >
502 avctx->bit_rate_tolerance) {
503 av_log(avctx, AV_LOG_WARNING,
504 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, (int64_t)avctx->bit_rate);
505 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
508 if (s->avctx->rc_max_rate &&
509 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
510 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
511 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
512 90000LL * (avctx->rc_buffer_size - 1) >
513 s->avctx->rc_max_rate * 0xFFFFLL) {
514 av_log(avctx, AV_LOG_INFO,
515 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
516 "specified vbv buffer is too large for the given bitrate!\n");
519 if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
520 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
521 s->codec_id != AV_CODEC_ID_FLV1) {
522 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
526 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
527 av_log(avctx, AV_LOG_ERROR,
528 "OBMC is only supported with simple mb decision\n");
532 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
533 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
537 if (s->max_b_frames &&
538 s->codec_id != AV_CODEC_ID_MPEG4 &&
539 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
540 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
541 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
544 if (s->max_b_frames < 0) {
545 av_log(avctx, AV_LOG_ERROR,
546 "max b frames must be 0 or positive for mpegvideo based encoders\n");
550 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
551 s->codec_id == AV_CODEC_ID_H263 ||
552 s->codec_id == AV_CODEC_ID_H263P) &&
553 (avctx->sample_aspect_ratio.num > 255 ||
554 avctx->sample_aspect_ratio.den > 255)) {
555 av_log(avctx, AV_LOG_WARNING,
556 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
557 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
558 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
559 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
562 if ((s->codec_id == AV_CODEC_ID_H263 ||
563 s->codec_id == AV_CODEC_ID_H263P) &&
564 (avctx->width > 2048 ||
565 avctx->height > 1152 )) {
566 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
569 if ((s->codec_id == AV_CODEC_ID_H263 ||
570 s->codec_id == AV_CODEC_ID_H263P) &&
571 ((avctx->width &3) ||
572 (avctx->height&3) )) {
573 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
577 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
578 (avctx->width > 4095 ||
579 avctx->height > 4095 )) {
580 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
584 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
585 (avctx->width > 16383 ||
586 avctx->height > 16383 )) {
587 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
591 if (s->codec_id == AV_CODEC_ID_RV10 &&
593 avctx->height&15 )) {
594 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
595 return AVERROR(EINVAL);
598 if (s->codec_id == AV_CODEC_ID_RV20 &&
601 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
602 return AVERROR(EINVAL);
605 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
606 s->codec_id == AV_CODEC_ID_WMV2) &&
608 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
612 if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
613 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
614 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
618 #if FF_API_PRIVATE_OPT
619 FF_DISABLE_DEPRECATION_WARNINGS
620 if (avctx->mpeg_quant)
621 s->mpeg_quant = avctx->mpeg_quant;
622 FF_ENABLE_DEPRECATION_WARNINGS
625 // FIXME mpeg2 uses that too
626 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
627 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
628 av_log(avctx, AV_LOG_ERROR,
629 "mpeg2 style quantization not supported by codec\n");
633 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
634 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
638 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
639 s->avctx->mb_decision != FF_MB_DECISION_RD) {
640 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
644 #if FF_API_PRIVATE_OPT
645 FF_DISABLE_DEPRECATION_WARNINGS
646 if (avctx->scenechange_threshold)
647 s->scenechange_threshold = avctx->scenechange_threshold;
648 FF_ENABLE_DEPRECATION_WARNINGS
651 if (s->scenechange_threshold < 1000000000 &&
652 (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
653 av_log(avctx, AV_LOG_ERROR,
654 "closed gop with scene change detection are not supported yet, "
655 "set threshold to 1000000000\n");
659 if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
660 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
661 av_log(avctx, AV_LOG_ERROR,
662 "low delay forcing is only available for mpeg2\n");
665 if (s->max_b_frames != 0) {
666 av_log(avctx, AV_LOG_ERROR,
667 "b frames cannot be used with low delay\n");
672 if (s->q_scale_type == 1) {
673 if (avctx->qmax > 28) {
674 av_log(avctx, AV_LOG_ERROR,
675 "non linear quant only supports qmax <= 28 currently\n");
680 if (avctx->slices > 1 &&
681 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
682 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
683 return AVERROR(EINVAL);
686 if (s->avctx->thread_count > 1 &&
687 s->codec_id != AV_CODEC_ID_MPEG4 &&
688 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
689 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
690 s->codec_id != AV_CODEC_ID_MJPEG &&
691 (s->codec_id != AV_CODEC_ID_H263P)) {
692 av_log(avctx, AV_LOG_ERROR,
693 "multi threaded encoding not supported by codec\n");
697 if (s->avctx->thread_count < 1) {
698 av_log(avctx, AV_LOG_ERROR,
699 "automatic thread number detection not supported by codec, "
704 if (!avctx->time_base.den || !avctx->time_base.num) {
705 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
709 #if FF_API_PRIVATE_OPT
710 FF_DISABLE_DEPRECATION_WARNINGS
711 if (avctx->b_frame_strategy)
712 s->b_frame_strategy = avctx->b_frame_strategy;
713 if (avctx->b_sensitivity != 40)
714 s->b_sensitivity = avctx->b_sensitivity;
715 FF_ENABLE_DEPRECATION_WARNINGS
718 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
719 av_log(avctx, AV_LOG_INFO,
720 "notice: b_frame_strategy only affects the first pass\n");
721 s->b_frame_strategy = 0;
724 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
726 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
727 avctx->time_base.den /= i;
728 avctx->time_base.num /= i;
732 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
733 // (a + x * 3 / 8) / x
734 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
735 s->inter_quant_bias = 0;
737 s->intra_quant_bias = 0;
739 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
742 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
743 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
744 return AVERROR(EINVAL);
747 #if FF_API_QUANT_BIAS
748 FF_DISABLE_DEPRECATION_WARNINGS
749 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
750 s->intra_quant_bias = avctx->intra_quant_bias;
751 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
752 s->inter_quant_bias = avctx->inter_quant_bias;
753 FF_ENABLE_DEPRECATION_WARNINGS
756 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
758 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
759 s->avctx->time_base.den > (1 << 16) - 1) {
760 av_log(avctx, AV_LOG_ERROR,
761 "timebase %d/%d not supported by MPEG 4 standard, "
762 "the maximum admitted value for the timebase denominator "
763 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
767 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
769 switch (avctx->codec->id) {
770 case AV_CODEC_ID_MPEG1VIDEO:
771 s->out_format = FMT_MPEG1;
772 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
773 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
775 case AV_CODEC_ID_MPEG2VIDEO:
776 s->out_format = FMT_MPEG1;
777 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
778 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
781 case AV_CODEC_ID_MJPEG:
782 case AV_CODEC_ID_AMV:
783 s->out_format = FMT_MJPEG;
784 s->intra_only = 1; /* force intra only for jpeg */
785 if (!CONFIG_MJPEG_ENCODER ||
786 ff_mjpeg_encode_init(s) < 0)
791 case AV_CODEC_ID_H261:
792 if (!CONFIG_H261_ENCODER)
794 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
795 av_log(avctx, AV_LOG_ERROR,
796 "The specified picture size of %dx%d is not valid for the "
797 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
798 s->width, s->height);
801 s->out_format = FMT_H261;
804 s->rtp_mode = 0; /* Sliced encoding not supported */
806 case AV_CODEC_ID_H263:
807 if (!CONFIG_H263_ENCODER)
809 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
810 s->width, s->height) == 8) {
811 av_log(avctx, AV_LOG_ERROR,
812 "The specified picture size of %dx%d is not valid for "
813 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
814 "352x288, 704x576, and 1408x1152. "
815 "Try H.263+.\n", s->width, s->height);
818 s->out_format = FMT_H263;
822 case AV_CODEC_ID_H263P:
823 s->out_format = FMT_H263;
826 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
827 s->modified_quant = s->h263_aic;
828 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
829 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
832 /* These are just to be sure */
836 case AV_CODEC_ID_FLV1:
837 s->out_format = FMT_H263;
838 s->h263_flv = 2; /* format = 1; 11-bit codes */
839 s->unrestricted_mv = 1;
840 s->rtp_mode = 0; /* don't allow GOB */
844 case AV_CODEC_ID_RV10:
845 s->out_format = FMT_H263;
849 case AV_CODEC_ID_RV20:
850 s->out_format = FMT_H263;
853 s->modified_quant = 1;
857 s->unrestricted_mv = 0;
859 case AV_CODEC_ID_MPEG4:
860 s->out_format = FMT_H263;
862 s->unrestricted_mv = 1;
863 s->low_delay = s->max_b_frames ? 0 : 1;
864 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
866 case AV_CODEC_ID_MSMPEG4V2:
867 s->out_format = FMT_H263;
869 s->unrestricted_mv = 1;
870 s->msmpeg4_version = 2;
874 case AV_CODEC_ID_MSMPEG4V3:
875 s->out_format = FMT_H263;
877 s->unrestricted_mv = 1;
878 s->msmpeg4_version = 3;
879 s->flipflop_rounding = 1;
883 case AV_CODEC_ID_WMV1:
884 s->out_format = FMT_H263;
886 s->unrestricted_mv = 1;
887 s->msmpeg4_version = 4;
888 s->flipflop_rounding = 1;
892 case AV_CODEC_ID_WMV2:
893 s->out_format = FMT_H263;
895 s->unrestricted_mv = 1;
896 s->msmpeg4_version = 5;
897 s->flipflop_rounding = 1;
905 #if FF_API_PRIVATE_OPT
906 FF_DISABLE_DEPRECATION_WARNINGS
907 if (avctx->noise_reduction)
908 s->noise_reduction = avctx->noise_reduction;
909 FF_ENABLE_DEPRECATION_WARNINGS
912 avctx->has_b_frames = !s->low_delay;
916 s->progressive_frame =
917 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
918 AV_CODEC_FLAG_INTERLACED_ME) ||
923 if (ff_mpv_common_init(s) < 0)
926 ff_fdctdsp_init(&s->fdsp, avctx);
927 ff_me_cmp_init(&s->mecc, avctx);
928 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
929 ff_pixblockdsp_init(&s->pdsp, avctx);
930 ff_qpeldsp_init(&s->qdsp);
932 if (s->msmpeg4_version) {
933 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
934 2 * 2 * (MAX_LEVEL + 1) *
935 (MAX_RUN + 1) * 2 * sizeof(int), fail);
937 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
939 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
940 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
941 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
942 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
943 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
944 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
945 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
946 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
947 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
948 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
951 if (s->noise_reduction) {
952 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
953 2 * 64 * sizeof(uint16_t), fail);
956 ff_dct_encode_init(s);
958 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
959 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
961 if (s->slice_context_count > 1) {
964 if (avctx->codec_id == AV_CODEC_ID_H263P)
965 s->h263_slice_structured = 1;
968 s->quant_precision = 5;
970 #if FF_API_PRIVATE_OPT
971 FF_DISABLE_DEPRECATION_WARNINGS
972 if (avctx->frame_skip_threshold)
973 s->frame_skip_threshold = avctx->frame_skip_threshold;
974 if (avctx->frame_skip_factor)
975 s->frame_skip_factor = avctx->frame_skip_factor;
976 if (avctx->frame_skip_exp)
977 s->frame_skip_exp = avctx->frame_skip_exp;
978 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
979 s->frame_skip_cmp = avctx->frame_skip_cmp;
980 FF_ENABLE_DEPRECATION_WARNINGS
983 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
984 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
986 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
987 ff_h261_encode_init(s);
988 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
989 ff_h263_encode_init(s);
990 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
991 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
993 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
994 && s->out_format == FMT_MPEG1)
995 ff_mpeg1_encode_init(s);
998 for (i = 0; i < 64; i++) {
999 int j = s->idsp.idct_permutation[i];
1000 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1002 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1003 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1004 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1005 s->intra_matrix[j] =
1006 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1009 s->chroma_intra_matrix[j] =
1010 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1011 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1013 if (s->avctx->intra_matrix)
1014 s->intra_matrix[j] = s->avctx->intra_matrix[i];
1015 if (s->avctx->inter_matrix)
1016 s->inter_matrix[j] = s->avctx->inter_matrix[i];
1019 /* precompute matrix */
1020 /* for mjpeg, we do include qscale in the matrix */
1021 if (s->out_format != FMT_MJPEG) {
1022 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1023 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1025 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1026 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1030 if (ff_rate_control_init(s) < 0)
1033 #if FF_API_ERROR_RATE
1034 FF_DISABLE_DEPRECATION_WARNINGS
1035 if (avctx->error_rate)
1036 s->error_rate = avctx->error_rate;
1037 FF_ENABLE_DEPRECATION_WARNINGS;
1040 #if FF_API_NORMALIZE_AQP
1041 FF_DISABLE_DEPRECATION_WARNINGS
1042 if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
1043 s->mpv_flags |= FF_MPV_FLAG_NAQ;
1044 FF_ENABLE_DEPRECATION_WARNINGS;
1048 FF_DISABLE_DEPRECATION_WARNINGS
1049 if (avctx->flags & CODEC_FLAG_MV0)
1050 s->mpv_flags |= FF_MPV_FLAG_MV0;
1051 FF_ENABLE_DEPRECATION_WARNINGS
1055 FF_DISABLE_DEPRECATION_WARNINGS
1056 if (avctx->rc_qsquish != 0.0)
1057 s->rc_qsquish = avctx->rc_qsquish;
1058 if (avctx->rc_qmod_amp != 0.0)
1059 s->rc_qmod_amp = avctx->rc_qmod_amp;
1060 if (avctx->rc_qmod_freq)
1061 s->rc_qmod_freq = avctx->rc_qmod_freq;
1062 if (avctx->rc_buffer_aggressivity != 1.0)
1063 s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
1064 if (avctx->rc_initial_cplx != 0.0)
1065 s->rc_initial_cplx = avctx->rc_initial_cplx;
1067 s->lmin = avctx->lmin;
1069 s->lmax = avctx->lmax;
1072 av_freep(&s->rc_eq);
1073 s->rc_eq = av_strdup(avctx->rc_eq);
1075 return AVERROR(ENOMEM);
1077 FF_ENABLE_DEPRECATION_WARNINGS
1080 #if FF_API_PRIVATE_OPT
1081 FF_DISABLE_DEPRECATION_WARNINGS
1082 if (avctx->brd_scale)
1083 s->brd_scale = avctx->brd_scale;
1085 if (avctx->prediction_method)
1086 s->pred = avctx->prediction_method + 1;
1087 FF_ENABLE_DEPRECATION_WARNINGS
1090 if (s->b_frame_strategy == 2) {
1091 for (i = 0; i < s->max_b_frames + 2; i++) {
1092 s->tmp_frames[i] = av_frame_alloc();
1093 if (!s->tmp_frames[i])
1094 return AVERROR(ENOMEM);
1096 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1097 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1098 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1100 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1106 cpb_props = ff_add_cpb_side_data(avctx);
1108 return AVERROR(ENOMEM);
1109 cpb_props->max_bitrate = avctx->rc_max_rate;
1110 cpb_props->min_bitrate = avctx->rc_min_rate;
1111 cpb_props->avg_bitrate = avctx->bit_rate;
1112 cpb_props->buffer_size = avctx->rc_buffer_size;
1116 ff_mpv_encode_end(avctx);
1117 return AVERROR_UNKNOWN;
1120 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1122 MpegEncContext *s = avctx->priv_data;
1125 ff_rate_control_uninit(s);
1127 ff_mpv_common_end(s);
1128 if (CONFIG_MJPEG_ENCODER &&
1129 s->out_format == FMT_MJPEG)
1130 ff_mjpeg_encode_close(s);
1132 av_freep(&avctx->extradata);
1134 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1135 av_frame_free(&s->tmp_frames[i]);
1137 ff_free_picture_tables(&s->new_picture);
1138 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1140 av_freep(&s->avctx->stats_out);
1141 av_freep(&s->ac_stats);
1143 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1144 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1145 s->q_chroma_intra_matrix= NULL;
1146 s->q_chroma_intra_matrix16= NULL;
1147 av_freep(&s->q_intra_matrix);
1148 av_freep(&s->q_inter_matrix);
1149 av_freep(&s->q_intra_matrix16);
1150 av_freep(&s->q_inter_matrix16);
1151 av_freep(&s->input_picture);
1152 av_freep(&s->reordered_input_picture);
1153 av_freep(&s->dct_offset);
1158 static int get_sae(uint8_t *src, int ref, int stride)
1163 for (y = 0; y < 16; y++) {
1164 for (x = 0; x < 16; x++) {
1165 acc += FFABS(src[x + y * stride] - ref);
1172 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1173 uint8_t *ref, int stride)
1179 h = s->height & ~15;
1181 for (y = 0; y < h; y += 16) {
1182 for (x = 0; x < w; x += 16) {
1183 int offset = x + y * stride;
1184 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1186 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1187 int sae = get_sae(src + offset, mean, stride);
1189 acc += sae + 500 < sad;
1195 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1197 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1198 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1199 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1200 &s->linesize, &s->uvlinesize);
1203 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1205 Picture *pic = NULL;
1207 int i, display_picture_number = 0, ret;
1208 int encoding_delay = s->max_b_frames ? s->max_b_frames
1209 : (s->low_delay ? 0 : 1);
1210 int flush_offset = 1;
1215 display_picture_number = s->input_picture_number++;
1217 if (pts != AV_NOPTS_VALUE) {
1218 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1219 int64_t last = s->user_specified_pts;
1222 av_log(s->avctx, AV_LOG_ERROR,
1223 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1225 return AVERROR(EINVAL);
1228 if (!s->low_delay && display_picture_number == 1)
1229 s->dts_delta = pts - last;
1231 s->user_specified_pts = pts;
1233 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1234 s->user_specified_pts =
1235 pts = s->user_specified_pts + 1;
1236 av_log(s->avctx, AV_LOG_INFO,
1237 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1240 pts = display_picture_number;
1244 if (!pic_arg->buf[0] ||
1245 pic_arg->linesize[0] != s->linesize ||
1246 pic_arg->linesize[1] != s->uvlinesize ||
1247 pic_arg->linesize[2] != s->uvlinesize)
1249 if ((s->width & 15) || (s->height & 15))
1251 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1253 if (s->linesize & (STRIDE_ALIGN-1))
1256 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1257 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1259 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1263 pic = &s->picture[i];
1267 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1270 ret = alloc_picture(s, pic, direct);
1275 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1276 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1277 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1280 int h_chroma_shift, v_chroma_shift;
1281 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1285 for (i = 0; i < 3; i++) {
1286 int src_stride = pic_arg->linesize[i];
1287 int dst_stride = i ? s->uvlinesize : s->linesize;
1288 int h_shift = i ? h_chroma_shift : 0;
1289 int v_shift = i ? v_chroma_shift : 0;
1290 int w = s->width >> h_shift;
1291 int h = s->height >> v_shift;
1292 uint8_t *src = pic_arg->data[i];
1293 uint8_t *dst = pic->f->data[i];
1296 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1297 && !s->progressive_sequence
1298 && FFALIGN(s->height, 32) - s->height > 16)
1301 if (!s->avctx->rc_buffer_size)
1302 dst += INPLACE_OFFSET;
1304 if (src_stride == dst_stride)
1305 memcpy(dst, src, src_stride * h);
1308 uint8_t *dst2 = dst;
1310 memcpy(dst2, src, w);
1315 if ((s->width & 15) || (s->height & (vpad-1))) {
1316 s->mpvencdsp.draw_edges(dst, dst_stride,
1325 ret = av_frame_copy_props(pic->f, pic_arg);
1329 pic->f->display_picture_number = display_picture_number;
1330 pic->f->pts = pts; // we set this here to avoid modifiying pic_arg
1332 /* Flushing: When we have not received enough input frames,
1333 * ensure s->input_picture[0] contains the first picture */
1334 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1335 if (s->input_picture[flush_offset])
1338 if (flush_offset <= 1)
1341 encoding_delay = encoding_delay - flush_offset + 1;
1344 /* shift buffer entries */
1345 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1346 s->input_picture[i - flush_offset] = s->input_picture[i];
1348 s->input_picture[encoding_delay] = (Picture*) pic;
1353 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1357 int64_t score64 = 0;
1359 for (plane = 0; plane < 3; plane++) {
1360 const int stride = p->f->linesize[plane];
1361 const int bw = plane ? 1 : 2;
1362 for (y = 0; y < s->mb_height * bw; y++) {
1363 for (x = 0; x < s->mb_width * bw; x++) {
1364 int off = p->shared ? 0 : 16;
1365 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1366 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1367 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1369 switch (FFABS(s->frame_skip_exp)) {
1370 case 0: score = FFMAX(score, v); break;
1371 case 1: score += FFABS(v); break;
1372 case 2: score64 += v * (int64_t)v; break;
1373 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1374 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1383 if (s->frame_skip_exp < 0)
1384 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1385 -1.0/s->frame_skip_exp);
1387 if (score64 < s->frame_skip_threshold)
1389 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1394 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1396 AVPacket pkt = { 0 };
1397 int ret, got_output;
1399 av_init_packet(&pkt);
1400 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1405 av_packet_unref(&pkt);
1409 static int estimate_best_b_count(MpegEncContext *s)
1411 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1412 AVCodecContext *c = avcodec_alloc_context3(NULL);
1413 const int scale = s->brd_scale;
1414 int i, j, out_size, p_lambda, b_lambda, lambda2;
1415 int64_t best_rd = INT64_MAX;
1416 int best_b_count = -1;
1419 return AVERROR(ENOMEM);
1420 av_assert0(scale >= 0 && scale <= 3);
1423 //s->next_picture_ptr->quality;
1424 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1425 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1426 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1427 if (!b_lambda) // FIXME we should do this somewhere else
1428 b_lambda = p_lambda;
1429 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1432 c->width = s->width >> scale;
1433 c->height = s->height >> scale;
1434 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1435 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1436 c->mb_decision = s->avctx->mb_decision;
1437 c->me_cmp = s->avctx->me_cmp;
1438 c->mb_cmp = s->avctx->mb_cmp;
1439 c->me_sub_cmp = s->avctx->me_sub_cmp;
1440 c->pix_fmt = AV_PIX_FMT_YUV420P;
1441 c->time_base = s->avctx->time_base;
1442 c->max_b_frames = s->max_b_frames;
1444 if (avcodec_open2(c, codec, NULL) < 0)
1447 for (i = 0; i < s->max_b_frames + 2; i++) {
1448 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1449 s->next_picture_ptr;
1452 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1453 pre_input = *pre_input_ptr;
1454 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1456 if (!pre_input.shared && i) {
1457 data[0] += INPLACE_OFFSET;
1458 data[1] += INPLACE_OFFSET;
1459 data[2] += INPLACE_OFFSET;
1462 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1463 s->tmp_frames[i]->linesize[0],
1465 pre_input.f->linesize[0],
1466 c->width, c->height);
1467 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1468 s->tmp_frames[i]->linesize[1],
1470 pre_input.f->linesize[1],
1471 c->width >> 1, c->height >> 1);
1472 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1473 s->tmp_frames[i]->linesize[2],
1475 pre_input.f->linesize[2],
1476 c->width >> 1, c->height >> 1);
1480 for (j = 0; j < s->max_b_frames + 1; j++) {
1483 if (!s->input_picture[j])
1486 c->error[0] = c->error[1] = c->error[2] = 0;
1488 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1489 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1491 out_size = encode_frame(c, s->tmp_frames[0]);
1493 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1495 for (i = 0; i < s->max_b_frames + 1; i++) {
1496 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1498 s->tmp_frames[i + 1]->pict_type = is_p ?
1499 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1500 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1502 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1504 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1507 /* get the delayed frames */
1509 out_size = encode_frame(c, NULL);
1510 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1513 rd += c->error[0] + c->error[1] + c->error[2];
1521 avcodec_free_context(&c);
1523 return best_b_count;
1526 static int select_input_picture(MpegEncContext *s)
1530 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1531 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1532 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1534 /* set next picture type & ordering */
1535 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1536 if (s->frame_skip_threshold || s->frame_skip_factor) {
1537 if (s->picture_in_gop_number < s->gop_size &&
1538 s->next_picture_ptr &&
1539 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1540 // FIXME check that te gop check above is +-1 correct
1541 av_frame_unref(s->input_picture[0]->f);
1543 ff_vbv_update(s, 0);
1549 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1550 !s->next_picture_ptr || s->intra_only) {
1551 s->reordered_input_picture[0] = s->input_picture[0];
1552 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1553 s->reordered_input_picture[0]->f->coded_picture_number =
1554 s->coded_picture_number++;
1558 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1559 for (i = 0; i < s->max_b_frames + 1; i++) {
1560 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1562 if (pict_num >= s->rc_context.num_entries)
1564 if (!s->input_picture[i]) {
1565 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1569 s->input_picture[i]->f->pict_type =
1570 s->rc_context.entry[pict_num].new_pict_type;
1574 if (s->b_frame_strategy == 0) {
1575 b_frames = s->max_b_frames;
1576 while (b_frames && !s->input_picture[b_frames])
1578 } else if (s->b_frame_strategy == 1) {
1579 for (i = 1; i < s->max_b_frames + 1; i++) {
1580 if (s->input_picture[i] &&
1581 s->input_picture[i]->b_frame_score == 0) {
1582 s->input_picture[i]->b_frame_score =
1584 s->input_picture[i ]->f->data[0],
1585 s->input_picture[i - 1]->f->data[0],
1589 for (i = 0; i < s->max_b_frames + 1; i++) {
1590 if (!s->input_picture[i] ||
1591 s->input_picture[i]->b_frame_score - 1 >
1592 s->mb_num / s->b_sensitivity)
1596 b_frames = FFMAX(0, i - 1);
1599 for (i = 0; i < b_frames + 1; i++) {
1600 s->input_picture[i]->b_frame_score = 0;
1602 } else if (s->b_frame_strategy == 2) {
1603 b_frames = estimate_best_b_count(s);
1608 for (i = b_frames - 1; i >= 0; i--) {
1609 int type = s->input_picture[i]->f->pict_type;
1610 if (type && type != AV_PICTURE_TYPE_B)
1613 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1614 b_frames == s->max_b_frames) {
1615 av_log(s->avctx, AV_LOG_ERROR,
1616 "warning, too many b frames in a row\n");
1619 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1620 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1621 s->gop_size > s->picture_in_gop_number) {
1622 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1624 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1626 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1630 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1631 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1634 s->reordered_input_picture[0] = s->input_picture[b_frames];
1635 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1636 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1637 s->reordered_input_picture[0]->f->coded_picture_number =
1638 s->coded_picture_number++;
1639 for (i = 0; i < b_frames; i++) {
1640 s->reordered_input_picture[i + 1] = s->input_picture[i];
1641 s->reordered_input_picture[i + 1]->f->pict_type =
1643 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1644 s->coded_picture_number++;
1649 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1651 if (s->reordered_input_picture[0]) {
1652 s->reordered_input_picture[0]->reference =
1653 s->reordered_input_picture[0]->f->pict_type !=
1654 AV_PICTURE_TYPE_B ? 3 : 0;
1656 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1659 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1660 // input is a shared pix, so we can't modifiy it -> alloc a new
1661 // one & ensure that the shared one is reuseable
1664 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1667 pic = &s->picture[i];
1669 pic->reference = s->reordered_input_picture[0]->reference;
1670 if (alloc_picture(s, pic, 0) < 0) {
1674 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1678 /* mark us unused / free shared pic */
1679 av_frame_unref(s->reordered_input_picture[0]->f);
1680 s->reordered_input_picture[0]->shared = 0;
1682 s->current_picture_ptr = pic;
1684 // input is not a shared pix -> reuse buffer for current_pix
1685 s->current_picture_ptr = s->reordered_input_picture[0];
1686 for (i = 0; i < 4; i++) {
1687 s->new_picture.f->data[i] += INPLACE_OFFSET;
1690 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1691 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1692 s->current_picture_ptr)) < 0)
1695 s->picture_number = s->new_picture.f->display_picture_number;
1700 static void frame_end(MpegEncContext *s)
1702 if (s->unrestricted_mv &&
1703 s->current_picture.reference &&
1705 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1706 int hshift = desc->log2_chroma_w;
1707 int vshift = desc->log2_chroma_h;
1708 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1709 s->current_picture.f->linesize[0],
1710 s->h_edge_pos, s->v_edge_pos,
1711 EDGE_WIDTH, EDGE_WIDTH,
1712 EDGE_TOP | EDGE_BOTTOM);
1713 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1714 s->current_picture.f->linesize[1],
1715 s->h_edge_pos >> hshift,
1716 s->v_edge_pos >> vshift,
1717 EDGE_WIDTH >> hshift,
1718 EDGE_WIDTH >> vshift,
1719 EDGE_TOP | EDGE_BOTTOM);
1720 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1721 s->current_picture.f->linesize[2],
1722 s->h_edge_pos >> hshift,
1723 s->v_edge_pos >> vshift,
1724 EDGE_WIDTH >> hshift,
1725 EDGE_WIDTH >> vshift,
1726 EDGE_TOP | EDGE_BOTTOM);
1731 s->last_pict_type = s->pict_type;
1732 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1733 if (s->pict_type!= AV_PICTURE_TYPE_B)
1734 s->last_non_b_pict_type = s->pict_type;
1736 #if FF_API_CODED_FRAME
1737 FF_DISABLE_DEPRECATION_WARNINGS
1738 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1739 FF_ENABLE_DEPRECATION_WARNINGS
1741 #if FF_API_ERROR_FRAME
1742 FF_DISABLE_DEPRECATION_WARNINGS
1743 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1744 sizeof(s->current_picture.encoding_error));
1745 FF_ENABLE_DEPRECATION_WARNINGS
1749 static void update_noise_reduction(MpegEncContext *s)
1753 for (intra = 0; intra < 2; intra++) {
1754 if (s->dct_count[intra] > (1 << 16)) {
1755 for (i = 0; i < 64; i++) {
1756 s->dct_error_sum[intra][i] >>= 1;
1758 s->dct_count[intra] >>= 1;
1761 for (i = 0; i < 64; i++) {
1762 s->dct_offset[intra][i] = (s->noise_reduction *
1763 s->dct_count[intra] +
1764 s->dct_error_sum[intra][i] / 2) /
1765 (s->dct_error_sum[intra][i] + 1);
1770 static int frame_start(MpegEncContext *s)
1774 /* mark & release old frames */
1775 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1776 s->last_picture_ptr != s->next_picture_ptr &&
1777 s->last_picture_ptr->f->buf[0]) {
1778 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1781 s->current_picture_ptr->f->pict_type = s->pict_type;
1782 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1784 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1785 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1786 s->current_picture_ptr)) < 0)
1789 if (s->pict_type != AV_PICTURE_TYPE_B) {
1790 s->last_picture_ptr = s->next_picture_ptr;
1792 s->next_picture_ptr = s->current_picture_ptr;
1795 if (s->last_picture_ptr) {
1796 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1797 if (s->last_picture_ptr->f->buf[0] &&
1798 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1799 s->last_picture_ptr)) < 0)
1802 if (s->next_picture_ptr) {
1803 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1804 if (s->next_picture_ptr->f->buf[0] &&
1805 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1806 s->next_picture_ptr)) < 0)
1810 if (s->picture_structure!= PICT_FRAME) {
1812 for (i = 0; i < 4; i++) {
1813 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1814 s->current_picture.f->data[i] +=
1815 s->current_picture.f->linesize[i];
1817 s->current_picture.f->linesize[i] *= 2;
1818 s->last_picture.f->linesize[i] *= 2;
1819 s->next_picture.f->linesize[i] *= 2;
1823 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1824 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1825 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1826 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1827 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1828 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1830 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1831 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1834 if (s->dct_error_sum) {
1835 av_assert2(s->noise_reduction && s->encoding);
1836 update_noise_reduction(s);
1842 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1843 const AVFrame *pic_arg, int *got_packet)
1845 MpegEncContext *s = avctx->priv_data;
1846 int i, stuffing_count, ret;
1847 int context_count = s->slice_context_count;
1849 s->vbv_ignore_qmax = 0;
1851 s->picture_in_gop_number++;
1853 if (load_input_picture(s, pic_arg) < 0)
1856 if (select_input_picture(s) < 0) {
1861 if (s->new_picture.f->data[0]) {
1862 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1863 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1865 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1866 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1869 s->mb_info_ptr = av_packet_new_side_data(pkt,
1870 AV_PKT_DATA_H263_MB_INFO,
1871 s->mb_width*s->mb_height*12);
1872 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1875 for (i = 0; i < context_count; i++) {
1876 int start_y = s->thread_context[i]->start_mb_y;
1877 int end_y = s->thread_context[i]-> end_mb_y;
1878 int h = s->mb_height;
1879 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1880 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1882 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1885 s->pict_type = s->new_picture.f->pict_type;
1887 ret = frame_start(s);
1891 ret = encode_picture(s, s->picture_number);
1892 if (growing_buffer) {
1893 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1894 pkt->data = s->pb.buf;
1895 pkt->size = avctx->internal->byte_buffer_size;
1900 #if FF_API_STAT_BITS
1901 FF_DISABLE_DEPRECATION_WARNINGS
1902 avctx->header_bits = s->header_bits;
1903 avctx->mv_bits = s->mv_bits;
1904 avctx->misc_bits = s->misc_bits;
1905 avctx->i_tex_bits = s->i_tex_bits;
1906 avctx->p_tex_bits = s->p_tex_bits;
1907 avctx->i_count = s->i_count;
1908 // FIXME f/b_count in avctx
1909 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1910 avctx->skip_count = s->skip_count;
1911 FF_ENABLE_DEPRECATION_WARNINGS
1916 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1917 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1919 if (avctx->rc_buffer_size) {
1920 RateControlContext *rcc = &s->rc_context;
1921 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1922 int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1923 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1925 if (put_bits_count(&s->pb) > max_size &&
1926 s->lambda < s->lmax) {
1927 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1928 (s->qscale + 1) / s->qscale);
1929 if (s->adaptive_quant) {
1931 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1932 s->lambda_table[i] =
1933 FFMAX(s->lambda_table[i] + min_step,
1934 s->lambda_table[i] * (s->qscale + 1) /
1937 s->mb_skipped = 0; // done in frame_start()
1938 // done in encode_picture() so we must undo it
1939 if (s->pict_type == AV_PICTURE_TYPE_P) {
1940 if (s->flipflop_rounding ||
1941 s->codec_id == AV_CODEC_ID_H263P ||
1942 s->codec_id == AV_CODEC_ID_MPEG4)
1943 s->no_rounding ^= 1;
1945 if (s->pict_type != AV_PICTURE_TYPE_B) {
1946 s->time_base = s->last_time_base;
1947 s->last_non_b_time = s->time - s->pp_time;
1949 for (i = 0; i < context_count; i++) {
1950 PutBitContext *pb = &s->thread_context[i]->pb;
1951 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1953 s->vbv_ignore_qmax = 1;
1954 av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1958 av_assert0(s->avctx->rc_max_rate);
1961 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1962 ff_write_pass1_stats(s);
1964 for (i = 0; i < 4; i++) {
1965 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1966 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1968 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1969 s->current_picture_ptr->encoding_error,
1970 (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1973 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1974 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1975 s->misc_bits + s->i_tex_bits +
1977 flush_put_bits(&s->pb);
1978 s->frame_bits = put_bits_count(&s->pb);
1980 stuffing_count = ff_vbv_update(s, s->frame_bits);
1981 s->stuffing_bits = 8*stuffing_count;
1982 if (stuffing_count) {
1983 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1984 stuffing_count + 50) {
1985 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1989 switch (s->codec_id) {
1990 case AV_CODEC_ID_MPEG1VIDEO:
1991 case AV_CODEC_ID_MPEG2VIDEO:
1992 while (stuffing_count--) {
1993 put_bits(&s->pb, 8, 0);
1996 case AV_CODEC_ID_MPEG4:
1997 put_bits(&s->pb, 16, 0);
1998 put_bits(&s->pb, 16, 0x1C3);
1999 stuffing_count -= 4;
2000 while (stuffing_count--) {
2001 put_bits(&s->pb, 8, 0xFF);
2005 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2007 flush_put_bits(&s->pb);
2008 s->frame_bits = put_bits_count(&s->pb);
2011 /* update mpeg1/2 vbv_delay for CBR */
2012 if (s->avctx->rc_max_rate &&
2013 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
2014 s->out_format == FMT_MPEG1 &&
2015 90000LL * (avctx->rc_buffer_size - 1) <=
2016 s->avctx->rc_max_rate * 0xFFFFLL) {
2017 AVCPBProperties *props;
2020 int vbv_delay, min_delay;
2021 double inbits = s->avctx->rc_max_rate *
2022 av_q2d(s->avctx->time_base);
2023 int minbits = s->frame_bits - 8 *
2024 (s->vbv_delay_ptr - s->pb.buf - 1);
2025 double bits = s->rc_context.buffer_index + minbits - inbits;
2028 av_log(s->avctx, AV_LOG_ERROR,
2029 "Internal error, negative bits\n");
2031 assert(s->repeat_first_field == 0);
2033 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
2034 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
2035 s->avctx->rc_max_rate;
2037 vbv_delay = FFMAX(vbv_delay, min_delay);
2039 av_assert0(vbv_delay < 0xFFFF);
2041 s->vbv_delay_ptr[0] &= 0xF8;
2042 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2043 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2044 s->vbv_delay_ptr[2] &= 0x07;
2045 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2047 props = av_cpb_properties_alloc(&props_size);
2049 return AVERROR(ENOMEM);
2050 props->vbv_delay = vbv_delay * 300;
2052 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2053 (uint8_t*)props, props_size);
2059 #if FF_API_VBV_DELAY
2060 FF_DISABLE_DEPRECATION_WARNINGS
2061 avctx->vbv_delay = vbv_delay * 300;
2062 FF_ENABLE_DEPRECATION_WARNINGS
2065 s->total_bits += s->frame_bits;
2066 #if FF_API_STAT_BITS
2067 FF_DISABLE_DEPRECATION_WARNINGS
2068 avctx->frame_bits = s->frame_bits;
2069 FF_ENABLE_DEPRECATION_WARNINGS
2073 pkt->pts = s->current_picture.f->pts;
2074 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2075 if (!s->current_picture.f->coded_picture_number)
2076 pkt->dts = pkt->pts - s->dts_delta;
2078 pkt->dts = s->reordered_pts;
2079 s->reordered_pts = pkt->pts;
2081 pkt->dts = pkt->pts;
2082 if (s->current_picture.f->key_frame)
2083 pkt->flags |= AV_PKT_FLAG_KEY;
2085 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2090 /* release non-reference frames */
2091 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2092 if (!s->picture[i].reference)
2093 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2096 av_assert1((s->frame_bits & 7) == 0);
2098 pkt->size = s->frame_bits / 8;
2099 *got_packet = !!pkt->size;
2103 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2104 int n, int threshold)
2106 static const char tab[64] = {
2107 3, 2, 2, 1, 1, 1, 1, 1,
2108 1, 1, 1, 1, 1, 1, 1, 1,
2109 1, 1, 1, 1, 1, 1, 1, 1,
2110 0, 0, 0, 0, 0, 0, 0, 0,
2111 0, 0, 0, 0, 0, 0, 0, 0,
2112 0, 0, 0, 0, 0, 0, 0, 0,
2113 0, 0, 0, 0, 0, 0, 0, 0,
2114 0, 0, 0, 0, 0, 0, 0, 0
2119 int16_t *block = s->block[n];
2120 const int last_index = s->block_last_index[n];
2123 if (threshold < 0) {
2125 threshold = -threshold;
2129 /* Are all we could set to zero already zero? */
2130 if (last_index <= skip_dc - 1)
2133 for (i = 0; i <= last_index; i++) {
2134 const int j = s->intra_scantable.permutated[i];
2135 const int level = FFABS(block[j]);
2137 if (skip_dc && i == 0)
2141 } else if (level > 1) {
2147 if (score >= threshold)
2149 for (i = skip_dc; i <= last_index; i++) {
2150 const int j = s->intra_scantable.permutated[i];
2154 s->block_last_index[n] = 0;
2156 s->block_last_index[n] = -1;
2159 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2163 const int maxlevel = s->max_qcoeff;
2164 const int minlevel = s->min_qcoeff;
2168 i = 1; // skip clipping of intra dc
2172 for (; i <= last_index; i++) {
2173 const int j = s->intra_scantable.permutated[i];
2174 int level = block[j];
2176 if (level > maxlevel) {
2179 } else if (level < minlevel) {
2187 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2188 av_log(s->avctx, AV_LOG_INFO,
2189 "warning, clipping %d dct coefficients to %d..%d\n",
2190 overflow, minlevel, maxlevel);
2193 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2197 for (y = 0; y < 8; y++) {
2198 for (x = 0; x < 8; x++) {
2204 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2205 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2206 int v = ptr[x2 + y2 * stride];
2212 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2217 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2218 int motion_x, int motion_y,
2219 int mb_block_height,
2223 int16_t weight[12][64];
2224 int16_t orig[12][64];
2225 const int mb_x = s->mb_x;
2226 const int mb_y = s->mb_y;
2229 int dct_offset = s->linesize * 8; // default for progressive frames
2230 int uv_dct_offset = s->uvlinesize * 8;
2231 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2232 ptrdiff_t wrap_y, wrap_c;
2234 for (i = 0; i < mb_block_count; i++)
2235 skip_dct[i] = s->skipdct;
2237 if (s->adaptive_quant) {
2238 const int last_qp = s->qscale;
2239 const int mb_xy = mb_x + mb_y * s->mb_stride;
2241 s->lambda = s->lambda_table[mb_xy];
2244 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2245 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2246 s->dquant = s->qscale - last_qp;
2248 if (s->out_format == FMT_H263) {
2249 s->dquant = av_clip(s->dquant, -2, 2);
2251 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2253 if (s->pict_type == AV_PICTURE_TYPE_B) {
2254 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2257 if (s->mv_type == MV_TYPE_8X8)
2263 ff_set_qscale(s, last_qp + s->dquant);
2264 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2265 ff_set_qscale(s, s->qscale + s->dquant);
2267 wrap_y = s->linesize;
2268 wrap_c = s->uvlinesize;
2269 ptr_y = s->new_picture.f->data[0] +
2270 (mb_y * 16 * wrap_y) + mb_x * 16;
2271 ptr_cb = s->new_picture.f->data[1] +
2272 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2273 ptr_cr = s->new_picture.f->data[2] +
2274 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2276 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2277 uint8_t *ebuf = s->sc.edge_emu_buffer + 36 * wrap_y;
2278 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2279 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2280 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2282 16, 16, mb_x * 16, mb_y * 16,
2283 s->width, s->height);
2285 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2287 mb_block_width, mb_block_height,
2288 mb_x * mb_block_width, mb_y * mb_block_height,
2290 ptr_cb = ebuf + 16 * wrap_y;
2291 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2293 mb_block_width, mb_block_height,
2294 mb_x * mb_block_width, mb_y * mb_block_height,
2296 ptr_cr = ebuf + 16 * wrap_y + 16;
2300 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2301 int progressive_score, interlaced_score;
2303 s->interlaced_dct = 0;
2304 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2305 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2306 NULL, wrap_y, 8) - 400;
2308 if (progressive_score > 0) {
2309 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2310 NULL, wrap_y * 2, 8) +
2311 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2312 NULL, wrap_y * 2, 8);
2313 if (progressive_score > interlaced_score) {
2314 s->interlaced_dct = 1;
2316 dct_offset = wrap_y;
2317 uv_dct_offset = wrap_c;
2319 if (s->chroma_format == CHROMA_422 ||
2320 s->chroma_format == CHROMA_444)
2326 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2327 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2328 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2329 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2331 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2335 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2336 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2337 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2338 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2339 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2340 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2341 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2342 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2343 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2344 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2345 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2346 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2350 op_pixels_func (*op_pix)[4];
2351 qpel_mc_func (*op_qpix)[16];
2352 uint8_t *dest_y, *dest_cb, *dest_cr;
2354 dest_y = s->dest[0];
2355 dest_cb = s->dest[1];
2356 dest_cr = s->dest[2];
2358 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2359 op_pix = s->hdsp.put_pixels_tab;
2360 op_qpix = s->qdsp.put_qpel_pixels_tab;
2362 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2363 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2366 if (s->mv_dir & MV_DIR_FORWARD) {
2367 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2368 s->last_picture.f->data,
2370 op_pix = s->hdsp.avg_pixels_tab;
2371 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2373 if (s->mv_dir & MV_DIR_BACKWARD) {
2374 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2375 s->next_picture.f->data,
2379 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2380 int progressive_score, interlaced_score;
2382 s->interlaced_dct = 0;
2383 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2384 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2388 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2389 progressive_score -= 400;
2391 if (progressive_score > 0) {
2392 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2394 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2398 if (progressive_score > interlaced_score) {
2399 s->interlaced_dct = 1;
2401 dct_offset = wrap_y;
2402 uv_dct_offset = wrap_c;
2404 if (s->chroma_format == CHROMA_422)
2410 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2411 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2412 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2413 dest_y + dct_offset, wrap_y);
2414 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2415 dest_y + dct_offset + 8, wrap_y);
2417 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2421 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2422 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2423 if (!s->chroma_y_shift) { /* 422 */
2424 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2425 dest_cb + uv_dct_offset, wrap_c);
2426 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2427 dest_cr + uv_dct_offset, wrap_c);
2430 /* pre quantization */
2431 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2432 2 * s->qscale * s->qscale) {
2434 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2436 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2438 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2439 wrap_y, 8) < 20 * s->qscale)
2441 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2442 wrap_y, 8) < 20 * s->qscale)
2444 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2446 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2448 if (!s->chroma_y_shift) { /* 422 */
2449 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2450 dest_cb + uv_dct_offset,
2451 wrap_c, 8) < 20 * s->qscale)
2453 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2454 dest_cr + uv_dct_offset,
2455 wrap_c, 8) < 20 * s->qscale)
2461 if (s->quantizer_noise_shaping) {
2463 get_visual_weight(weight[0], ptr_y , wrap_y);
2465 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2467 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2469 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2471 get_visual_weight(weight[4], ptr_cb , wrap_c);
2473 get_visual_weight(weight[5], ptr_cr , wrap_c);
2474 if (!s->chroma_y_shift) { /* 422 */
2476 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2479 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2482 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2485 /* DCT & quantize */
2486 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2488 for (i = 0; i < mb_block_count; i++) {
2491 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2492 // FIXME we could decide to change to quantizer instead of
2494 // JS: I don't think that would be a good idea it could lower
2495 // quality instead of improve it. Just INTRADC clipping
2496 // deserves changes in quantizer
2498 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2500 s->block_last_index[i] = -1;
2502 if (s->quantizer_noise_shaping) {
2503 for (i = 0; i < mb_block_count; i++) {
2505 s->block_last_index[i] =
2506 dct_quantize_refine(s, s->block[i], weight[i],
2507 orig[i], i, s->qscale);
2512 if (s->luma_elim_threshold && !s->mb_intra)
2513 for (i = 0; i < 4; i++)
2514 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2515 if (s->chroma_elim_threshold && !s->mb_intra)
2516 for (i = 4; i < mb_block_count; i++)
2517 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2519 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2520 for (i = 0; i < mb_block_count; i++) {
2521 if (s->block_last_index[i] == -1)
2522 s->coded_score[i] = INT_MAX / 256;
2527 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2528 s->block_last_index[4] =
2529 s->block_last_index[5] = 0;
2531 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2532 if (!s->chroma_y_shift) { /* 422 / 444 */
2533 for (i=6; i<12; i++) {
2534 s->block_last_index[i] = 0;
2535 s->block[i][0] = s->block[4][0];
2540 // non c quantize code returns incorrect block_last_index FIXME
2541 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2542 for (i = 0; i < mb_block_count; i++) {
2544 if (s->block_last_index[i] > 0) {
2545 for (j = 63; j > 0; j--) {
2546 if (s->block[i][s->intra_scantable.permutated[j]])
2549 s->block_last_index[i] = j;
2554 /* huffman encode */
2555 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2556 case AV_CODEC_ID_MPEG1VIDEO:
2557 case AV_CODEC_ID_MPEG2VIDEO:
2558 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2559 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2561 case AV_CODEC_ID_MPEG4:
2562 if (CONFIG_MPEG4_ENCODER)
2563 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2565 case AV_CODEC_ID_MSMPEG4V2:
2566 case AV_CODEC_ID_MSMPEG4V3:
2567 case AV_CODEC_ID_WMV1:
2568 if (CONFIG_MSMPEG4_ENCODER)
2569 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2571 case AV_CODEC_ID_WMV2:
2572 if (CONFIG_WMV2_ENCODER)
2573 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2575 case AV_CODEC_ID_H261:
2576 if (CONFIG_H261_ENCODER)
2577 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2579 case AV_CODEC_ID_H263:
2580 case AV_CODEC_ID_H263P:
2581 case AV_CODEC_ID_FLV1:
2582 case AV_CODEC_ID_RV10:
2583 case AV_CODEC_ID_RV20:
2584 if (CONFIG_H263_ENCODER)
2585 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2587 case AV_CODEC_ID_MJPEG:
2588 case AV_CODEC_ID_AMV:
2589 if (CONFIG_MJPEG_ENCODER)
2590 ff_mjpeg_encode_mb(s, s->block);
2597 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2599 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2600 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2601 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2604 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2607 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2610 d->mb_skip_run= s->mb_skip_run;
2612 d->last_dc[i] = s->last_dc[i];
2615 d->mv_bits= s->mv_bits;
2616 d->i_tex_bits= s->i_tex_bits;
2617 d->p_tex_bits= s->p_tex_bits;
2618 d->i_count= s->i_count;
2619 d->f_count= s->f_count;
2620 d->b_count= s->b_count;
2621 d->skip_count= s->skip_count;
2622 d->misc_bits= s->misc_bits;
2626 d->qscale= s->qscale;
2627 d->dquant= s->dquant;
2629 d->esc3_level_length= s->esc3_level_length;
2632 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2635 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2636 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2639 d->mb_skip_run= s->mb_skip_run;
2641 d->last_dc[i] = s->last_dc[i];
2644 d->mv_bits= s->mv_bits;
2645 d->i_tex_bits= s->i_tex_bits;
2646 d->p_tex_bits= s->p_tex_bits;
2647 d->i_count= s->i_count;
2648 d->f_count= s->f_count;
2649 d->b_count= s->b_count;
2650 d->skip_count= s->skip_count;
2651 d->misc_bits= s->misc_bits;
2653 d->mb_intra= s->mb_intra;
2654 d->mb_skipped= s->mb_skipped;
2655 d->mv_type= s->mv_type;
2656 d->mv_dir= s->mv_dir;
2658 if(s->data_partitioning){
2660 d->tex_pb= s->tex_pb;
2664 d->block_last_index[i]= s->block_last_index[i];
2665 d->interlaced_dct= s->interlaced_dct;
2666 d->qscale= s->qscale;
2668 d->esc3_level_length= s->esc3_level_length;
2671 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2672 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2673 int *dmin, int *next_block, int motion_x, int motion_y)
2676 uint8_t *dest_backup[3];
2678 copy_context_before_encode(s, backup, type);
2680 s->block= s->blocks[*next_block];
2681 s->pb= pb[*next_block];
2682 if(s->data_partitioning){
2683 s->pb2 = pb2 [*next_block];
2684 s->tex_pb= tex_pb[*next_block];
2688 memcpy(dest_backup, s->dest, sizeof(s->dest));
2689 s->dest[0] = s->sc.rd_scratchpad;
2690 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2691 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2692 av_assert0(s->linesize >= 32); //FIXME
2695 encode_mb(s, motion_x, motion_y);
2697 score= put_bits_count(&s->pb);
2698 if(s->data_partitioning){
2699 score+= put_bits_count(&s->pb2);
2700 score+= put_bits_count(&s->tex_pb);
2703 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2704 ff_mpv_decode_mb(s, s->block);
2706 score *= s->lambda2;
2707 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2711 memcpy(s->dest, dest_backup, sizeof(s->dest));
2718 copy_context_after_encode(best, s, type);
2722 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2723 uint32_t *sq = ff_square_tab + 256;
2728 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2729 else if(w==8 && h==8)
2730 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2734 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2743 static int sse_mb(MpegEncContext *s){
2747 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2748 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2751 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2752 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2753 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2754 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2756 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2757 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2758 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2761 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2762 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2763 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2766 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2767 MpegEncContext *s= *(void**)arg;
2771 s->me.dia_size= s->avctx->pre_dia_size;
2772 s->first_slice_line=1;
2773 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2774 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2775 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2777 s->first_slice_line=0;
2785 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2786 MpegEncContext *s= *(void**)arg;
2788 ff_check_alignment();
2790 s->me.dia_size= s->avctx->dia_size;
2791 s->first_slice_line=1;
2792 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2793 s->mb_x=0; //for block init below
2794 ff_init_block_index(s);
2795 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2796 s->block_index[0]+=2;
2797 s->block_index[1]+=2;
2798 s->block_index[2]+=2;
2799 s->block_index[3]+=2;
2801 /* compute motion vector & mb_type and store in context */
2802 if(s->pict_type==AV_PICTURE_TYPE_B)
2803 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2805 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2807 s->first_slice_line=0;
2812 static int mb_var_thread(AVCodecContext *c, void *arg){
2813 MpegEncContext *s= *(void**)arg;
2816 ff_check_alignment();
2818 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2819 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2822 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2824 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2826 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2827 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2829 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2830 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2831 s->me.mb_var_sum_temp += varc;
2837 static void write_slice_end(MpegEncContext *s){
2838 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2839 if(s->partitioned_frame){
2840 ff_mpeg4_merge_partitions(s);
2843 ff_mpeg4_stuffing(&s->pb);
2844 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2845 ff_mjpeg_encode_stuffing(s);
2848 avpriv_align_put_bits(&s->pb);
2849 flush_put_bits(&s->pb);
2851 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2852 s->misc_bits+= get_bits_diff(s);
2855 static void write_mb_info(MpegEncContext *s)
2857 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2858 int offset = put_bits_count(&s->pb);
2859 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2860 int gobn = s->mb_y / s->gob_index;
2862 if (CONFIG_H263_ENCODER)
2863 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2864 bytestream_put_le32(&ptr, offset);
2865 bytestream_put_byte(&ptr, s->qscale);
2866 bytestream_put_byte(&ptr, gobn);
2867 bytestream_put_le16(&ptr, mba);
2868 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2869 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2870 /* 4MV not implemented */
2871 bytestream_put_byte(&ptr, 0); /* hmv2 */
2872 bytestream_put_byte(&ptr, 0); /* vmv2 */
2875 static void update_mb_info(MpegEncContext *s, int startcode)
2879 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2880 s->mb_info_size += 12;
2881 s->prev_mb_info = s->last_mb_info;
2884 s->prev_mb_info = put_bits_count(&s->pb)/8;
2885 /* This might have incremented mb_info_size above, and we return without
2886 * actually writing any info into that slot yet. But in that case,
2887 * this will be called again at the start of the after writing the
2888 * start code, actually writing the mb info. */
2892 s->last_mb_info = put_bits_count(&s->pb)/8;
2893 if (!s->mb_info_size)
2894 s->mb_info_size += 12;
2898 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2900 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2901 && s->slice_context_count == 1
2902 && s->pb.buf == s->avctx->internal->byte_buffer) {
2903 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2904 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2906 uint8_t *new_buffer = NULL;
2907 int new_buffer_size = 0;
2909 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2910 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2911 return AVERROR(ENOMEM);
2914 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2915 s->avctx->internal->byte_buffer_size + size_increase);
2917 return AVERROR(ENOMEM);
2919 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2920 av_free(s->avctx->internal->byte_buffer);
2921 s->avctx->internal->byte_buffer = new_buffer;
2922 s->avctx->internal->byte_buffer_size = new_buffer_size;
2923 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2924 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2925 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2927 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2928 return AVERROR(EINVAL);
2932 static int encode_thread(AVCodecContext *c, void *arg){
2933 MpegEncContext *s= *(void**)arg;
2934 int mb_x, mb_y, pdif = 0;
2935 int chr_h= 16>>s->chroma_y_shift;
2937 MpegEncContext best_s = { 0 }, backup_s;
2938 uint8_t bit_buf[2][MAX_MB_BYTES];
2939 uint8_t bit_buf2[2][MAX_MB_BYTES];
2940 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2941 PutBitContext pb[2], pb2[2], tex_pb[2];
2943 ff_check_alignment();
2946 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2947 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2948 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2951 s->last_bits= put_bits_count(&s->pb);
2962 /* init last dc values */
2963 /* note: quant matrix value (8) is implied here */
2964 s->last_dc[i] = 128 << s->intra_dc_precision;
2966 s->current_picture.encoding_error[i] = 0;
2968 if(s->codec_id==AV_CODEC_ID_AMV){
2969 s->last_dc[0] = 128*8/13;
2970 s->last_dc[1] = 128*8/14;
2971 s->last_dc[2] = 128*8/14;
2974 memset(s->last_mv, 0, sizeof(s->last_mv));
2978 switch(s->codec_id){
2979 case AV_CODEC_ID_H263:
2980 case AV_CODEC_ID_H263P:
2981 case AV_CODEC_ID_FLV1:
2982 if (CONFIG_H263_ENCODER)
2983 s->gob_index = H263_GOB_HEIGHT(s->height);
2985 case AV_CODEC_ID_MPEG4:
2986 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2987 ff_mpeg4_init_partitions(s);
2993 s->first_slice_line = 1;
2994 s->ptr_lastgob = s->pb.buf;
2995 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2999 ff_set_qscale(s, s->qscale);
3000 ff_init_block_index(s);
3002 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3003 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3004 int mb_type= s->mb_type[xy];
3008 int size_increase = s->avctx->internal->byte_buffer_size/4
3009 + s->mb_width*MAX_MB_BYTES;
3011 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
3012 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
3013 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3016 if(s->data_partitioning){
3017 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3018 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3019 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3025 s->mb_y = mb_y; // moved into loop, can get changed by H.261
3026 ff_update_block_index(s);
3028 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3029 ff_h261_reorder_mb_index(s);
3030 xy= s->mb_y*s->mb_stride + s->mb_x;
3031 mb_type= s->mb_type[xy];
3034 /* write gob / video packet header */
3036 int current_packet_size, is_gob_start;
3038 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3040 is_gob_start = s->rtp_payload_size &&
3041 current_packet_size >= s->rtp_payload_size &&
3044 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3046 switch(s->codec_id){
3047 case AV_CODEC_ID_H263:
3048 case AV_CODEC_ID_H263P:
3049 if(!s->h263_slice_structured)
3050 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3052 case AV_CODEC_ID_MPEG2VIDEO:
3053 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3054 case AV_CODEC_ID_MPEG1VIDEO:
3055 if(s->mb_skip_run) is_gob_start=0;
3057 case AV_CODEC_ID_MJPEG:
3058 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3063 if(s->start_mb_y != mb_y || mb_x!=0){
3066 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3067 ff_mpeg4_init_partitions(s);
3071 av_assert2((put_bits_count(&s->pb)&7) == 0);
3072 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3074 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3075 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3076 int d = 100 / s->error_rate;
3078 current_packet_size=0;
3079 s->pb.buf_ptr= s->ptr_lastgob;
3080 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3084 #if FF_API_RTP_CALLBACK
3085 FF_DISABLE_DEPRECATION_WARNINGS
3086 if (s->avctx->rtp_callback){
3087 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3088 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3090 FF_ENABLE_DEPRECATION_WARNINGS
3092 update_mb_info(s, 1);
3094 switch(s->codec_id){
3095 case AV_CODEC_ID_MPEG4:
3096 if (CONFIG_MPEG4_ENCODER) {
3097 ff_mpeg4_encode_video_packet_header(s);
3098 ff_mpeg4_clean_buffers(s);
3101 case AV_CODEC_ID_MPEG1VIDEO:
3102 case AV_CODEC_ID_MPEG2VIDEO:
3103 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3104 ff_mpeg1_encode_slice_header(s);
3105 ff_mpeg1_clean_buffers(s);
3108 case AV_CODEC_ID_H263:
3109 case AV_CODEC_ID_H263P:
3110 if (CONFIG_H263_ENCODER)
3111 ff_h263_encode_gob_header(s, mb_y);
3115 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3116 int bits= put_bits_count(&s->pb);
3117 s->misc_bits+= bits - s->last_bits;
3121 s->ptr_lastgob += current_packet_size;
3122 s->first_slice_line=1;
3123 s->resync_mb_x=mb_x;
3124 s->resync_mb_y=mb_y;
3128 if( (s->resync_mb_x == s->mb_x)
3129 && s->resync_mb_y+1 == s->mb_y){
3130 s->first_slice_line=0;
3134 s->dquant=0; //only for QP_RD
3136 update_mb_info(s, 0);
3138 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3140 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3142 copy_context_before_encode(&backup_s, s, -1);
3144 best_s.data_partitioning= s->data_partitioning;
3145 best_s.partitioned_frame= s->partitioned_frame;
3146 if(s->data_partitioning){
3147 backup_s.pb2= s->pb2;
3148 backup_s.tex_pb= s->tex_pb;
3151 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3152 s->mv_dir = MV_DIR_FORWARD;
3153 s->mv_type = MV_TYPE_16X16;
3155 s->mv[0][0][0] = s->p_mv_table[xy][0];
3156 s->mv[0][0][1] = s->p_mv_table[xy][1];
3157 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3158 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3160 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3161 s->mv_dir = MV_DIR_FORWARD;
3162 s->mv_type = MV_TYPE_FIELD;
3165 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3166 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3167 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3169 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3170 &dmin, &next_block, 0, 0);
3172 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3173 s->mv_dir = MV_DIR_FORWARD;
3174 s->mv_type = MV_TYPE_16X16;
3178 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3179 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3181 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3182 s->mv_dir = MV_DIR_FORWARD;
3183 s->mv_type = MV_TYPE_8X8;
3186 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3187 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3189 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3190 &dmin, &next_block, 0, 0);
3192 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3193 s->mv_dir = MV_DIR_FORWARD;
3194 s->mv_type = MV_TYPE_16X16;
3196 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3197 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3198 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3199 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3201 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3202 s->mv_dir = MV_DIR_BACKWARD;
3203 s->mv_type = MV_TYPE_16X16;
3205 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3206 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3207 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3208 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3210 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3211 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3212 s->mv_type = MV_TYPE_16X16;
3214 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3215 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3216 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3217 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3218 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3219 &dmin, &next_block, 0, 0);
3221 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3222 s->mv_dir = MV_DIR_FORWARD;
3223 s->mv_type = MV_TYPE_FIELD;
3226 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3227 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3228 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3230 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3231 &dmin, &next_block, 0, 0);
3233 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3234 s->mv_dir = MV_DIR_BACKWARD;
3235 s->mv_type = MV_TYPE_FIELD;
3238 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3239 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3240 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3242 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3243 &dmin, &next_block, 0, 0);
3245 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3246 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3247 s->mv_type = MV_TYPE_FIELD;
3249 for(dir=0; dir<2; dir++){
3251 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3252 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3253 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3256 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3257 &dmin, &next_block, 0, 0);
3259 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3261 s->mv_type = MV_TYPE_16X16;
3265 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3266 &dmin, &next_block, 0, 0);
3267 if(s->h263_pred || s->h263_aic){
3269 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3271 ff_clean_intra_table_entries(s); //old mode?
3275 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3276 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3277 const int last_qp= backup_s.qscale;
3280 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3281 static const int dquant_tab[4]={-1,1,-2,2};
3282 int storecoefs = s->mb_intra && s->dc_val[0];
3284 av_assert2(backup_s.dquant == 0);
3287 s->mv_dir= best_s.mv_dir;
3288 s->mv_type = MV_TYPE_16X16;
3289 s->mb_intra= best_s.mb_intra;
3290 s->mv[0][0][0] = best_s.mv[0][0][0];
3291 s->mv[0][0][1] = best_s.mv[0][0][1];
3292 s->mv[1][0][0] = best_s.mv[1][0][0];
3293 s->mv[1][0][1] = best_s.mv[1][0][1];
3295 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3296 for(; qpi<4; qpi++){
3297 int dquant= dquant_tab[qpi];
3298 qp= last_qp + dquant;
3299 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3301 backup_s.dquant= dquant;
3304 dc[i]= s->dc_val[0][ s->block_index[i] ];
3305 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3309 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3310 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3311 if(best_s.qscale != qp){
3314 s->dc_val[0][ s->block_index[i] ]= dc[i];
3315 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3322 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3323 int mx= s->b_direct_mv_table[xy][0];
3324 int my= s->b_direct_mv_table[xy][1];
3326 backup_s.dquant = 0;
3327 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3329 ff_mpeg4_set_direct_mv(s, mx, my);
3330 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3331 &dmin, &next_block, mx, my);
3333 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3334 backup_s.dquant = 0;
3335 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3337 ff_mpeg4_set_direct_mv(s, 0, 0);
3338 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3339 &dmin, &next_block, 0, 0);
3341 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3344 coded |= s->block_last_index[i];
3347 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3348 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3349 mx=my=0; //FIXME find the one we actually used
3350 ff_mpeg4_set_direct_mv(s, mx, my);
3351 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3359 s->mv_dir= best_s.mv_dir;
3360 s->mv_type = best_s.mv_type;
3362 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3363 s->mv[0][0][1] = best_s.mv[0][0][1];
3364 s->mv[1][0][0] = best_s.mv[1][0][0];
3365 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3368 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3369 &dmin, &next_block, mx, my);
3374 s->current_picture.qscale_table[xy] = best_s.qscale;
3376 copy_context_after_encode(s, &best_s, -1);
3378 pb_bits_count= put_bits_count(&s->pb);
3379 flush_put_bits(&s->pb);
3380 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3383 if(s->data_partitioning){
3384 pb2_bits_count= put_bits_count(&s->pb2);
3385 flush_put_bits(&s->pb2);
3386 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3387 s->pb2= backup_s.pb2;
3389 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3390 flush_put_bits(&s->tex_pb);
3391 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3392 s->tex_pb= backup_s.tex_pb;
3394 s->last_bits= put_bits_count(&s->pb);
3396 if (CONFIG_H263_ENCODER &&
3397 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3398 ff_h263_update_motion_val(s);
3400 if(next_block==0){ //FIXME 16 vs linesize16
3401 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3402 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3403 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3406 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3407 ff_mpv_decode_mb(s, s->block);
3409 int motion_x = 0, motion_y = 0;
3410 s->mv_type=MV_TYPE_16X16;
3411 // only one MB-Type possible
3414 case CANDIDATE_MB_TYPE_INTRA:
3417 motion_x= s->mv[0][0][0] = 0;
3418 motion_y= s->mv[0][0][1] = 0;
3420 case CANDIDATE_MB_TYPE_INTER:
3421 s->mv_dir = MV_DIR_FORWARD;
3423 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3424 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3426 case CANDIDATE_MB_TYPE_INTER_I:
3427 s->mv_dir = MV_DIR_FORWARD;
3428 s->mv_type = MV_TYPE_FIELD;
3431 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3432 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3433 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3436 case CANDIDATE_MB_TYPE_INTER4V:
3437 s->mv_dir = MV_DIR_FORWARD;
3438 s->mv_type = MV_TYPE_8X8;
3441 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3442 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3445 case CANDIDATE_MB_TYPE_DIRECT:
3446 if (CONFIG_MPEG4_ENCODER) {
3447 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3449 motion_x=s->b_direct_mv_table[xy][0];
3450 motion_y=s->b_direct_mv_table[xy][1];
3451 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3454 case CANDIDATE_MB_TYPE_DIRECT0:
3455 if (CONFIG_MPEG4_ENCODER) {
3456 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3458 ff_mpeg4_set_direct_mv(s, 0, 0);
3461 case CANDIDATE_MB_TYPE_BIDIR:
3462 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3464 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3465 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3466 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3467 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3469 case CANDIDATE_MB_TYPE_BACKWARD:
3470 s->mv_dir = MV_DIR_BACKWARD;
3472 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3473 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3475 case CANDIDATE_MB_TYPE_FORWARD:
3476 s->mv_dir = MV_DIR_FORWARD;
3478 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3479 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3481 case CANDIDATE_MB_TYPE_FORWARD_I:
3482 s->mv_dir = MV_DIR_FORWARD;
3483 s->mv_type = MV_TYPE_FIELD;
3486 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3487 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3488 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3491 case CANDIDATE_MB_TYPE_BACKWARD_I:
3492 s->mv_dir = MV_DIR_BACKWARD;
3493 s->mv_type = MV_TYPE_FIELD;
3496 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3497 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3498 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3501 case CANDIDATE_MB_TYPE_BIDIR_I:
3502 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3503 s->mv_type = MV_TYPE_FIELD;
3505 for(dir=0; dir<2; dir++){
3507 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3508 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3509 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3514 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3517 encode_mb(s, motion_x, motion_y);
3519 // RAL: Update last macroblock type
3520 s->last_mv_dir = s->mv_dir;
3522 if (CONFIG_H263_ENCODER &&
3523 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3524 ff_h263_update_motion_val(s);
3526 ff_mpv_decode_mb(s, s->block);
3529 /* clean the MV table in IPS frames for direct mode in B frames */
3530 if(s->mb_intra /* && I,P,S_TYPE */){
3531 s->p_mv_table[xy][0]=0;
3532 s->p_mv_table[xy][1]=0;
3535 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3539 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3540 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3542 s->current_picture.encoding_error[0] += sse(
3543 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3544 s->dest[0], w, h, s->linesize);
3545 s->current_picture.encoding_error[1] += sse(
3546 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3547 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3548 s->current_picture.encoding_error[2] += sse(
3549 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3550 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3553 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3554 ff_h263_loop_filter(s);
3556 ff_dlog(s->avctx, "MB %d %d bits\n",
3557 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3561 //not beautiful here but we must write it before flushing so it has to be here
3562 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3563 ff_msmpeg4_encode_ext_header(s);
3567 #if FF_API_RTP_CALLBACK
3568 FF_DISABLE_DEPRECATION_WARNINGS
3569 /* Send the last GOB if RTP */
3570 if (s->avctx->rtp_callback) {
3571 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3572 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3573 /* Call the RTP callback to send the last GOB */
3575 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3577 FF_ENABLE_DEPRECATION_WARNINGS
3583 #define MERGE(field) dst->field += src->field; src->field=0
3584 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3585 MERGE(me.scene_change_score);
3586 MERGE(me.mc_mb_var_sum_temp);
3587 MERGE(me.mb_var_sum_temp);
3590 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3593 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3594 MERGE(dct_count[1]);
3603 MERGE(er.error_count);
3604 MERGE(padding_bug_score);
3605 MERGE(current_picture.encoding_error[0]);
3606 MERGE(current_picture.encoding_error[1]);
3607 MERGE(current_picture.encoding_error[2]);
3609 if (dst->noise_reduction){
3610 for(i=0; i<64; i++){
3611 MERGE(dct_error_sum[0][i]);
3612 MERGE(dct_error_sum[1][i]);
3616 assert(put_bits_count(&src->pb) % 8 ==0);
3617 assert(put_bits_count(&dst->pb) % 8 ==0);
3618 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3619 flush_put_bits(&dst->pb);
3622 static int estimate_qp(MpegEncContext *s, int dry_run){
3623 if (s->next_lambda){
3624 s->current_picture_ptr->f->quality =
3625 s->current_picture.f->quality = s->next_lambda;
3626 if(!dry_run) s->next_lambda= 0;
3627 } else if (!s->fixed_qscale) {
3628 s->current_picture_ptr->f->quality =
3629 s->current_picture.f->quality = ff_rate_estimate_qscale(s, dry_run);
3630 if (s->current_picture.f->quality < 0)
3634 if(s->adaptive_quant){
3635 switch(s->codec_id){
3636 case AV_CODEC_ID_MPEG4:
3637 if (CONFIG_MPEG4_ENCODER)
3638 ff_clean_mpeg4_qscales(s);
3640 case AV_CODEC_ID_H263:
3641 case AV_CODEC_ID_H263P:
3642 case AV_CODEC_ID_FLV1:
3643 if (CONFIG_H263_ENCODER)
3644 ff_clean_h263_qscales(s);
3647 ff_init_qscale_tab(s);
3650 s->lambda= s->lambda_table[0];
3653 s->lambda = s->current_picture.f->quality;
3658 /* must be called before writing the header */
3659 static void set_frame_distances(MpegEncContext * s){
3660 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3661 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3663 if(s->pict_type==AV_PICTURE_TYPE_B){
3664 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3665 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3667 s->pp_time= s->time - s->last_non_b_time;
3668 s->last_non_b_time= s->time;
3669 assert(s->picture_number==0 || s->pp_time > 0);
3673 static int encode_picture(MpegEncContext *s, int picture_number)
3677 int context_count = s->slice_context_count;
3679 s->picture_number = picture_number;
3681 /* Reset the average MB variance */
3682 s->me.mb_var_sum_temp =
3683 s->me.mc_mb_var_sum_temp = 0;
3685 /* we need to initialize some time vars before we can encode b-frames */
3686 // RAL: Condition added for MPEG1VIDEO
3687 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3688 set_frame_distances(s);
3689 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3690 ff_set_mpeg4_time(s);
3692 s->me.scene_change_score=0;
3694 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3696 if(s->pict_type==AV_PICTURE_TYPE_I){
3697 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3698 else s->no_rounding=0;
3699 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3700 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3701 s->no_rounding ^= 1;
3704 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3705 if (estimate_qp(s,1) < 0)
3707 ff_get_2pass_fcode(s);
3708 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3709 if(s->pict_type==AV_PICTURE_TYPE_B)
3710 s->lambda= s->last_lambda_for[s->pict_type];
3712 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3716 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3717 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3718 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3719 s->q_chroma_intra_matrix = s->q_intra_matrix;
3720 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3723 s->mb_intra=0; //for the rate distortion & bit compare functions
3724 for(i=1; i<context_count; i++){
3725 ret = ff_update_duplicate_context(s->thread_context[i], s);
3733 /* Estimate motion for every MB */
3734 if(s->pict_type != AV_PICTURE_TYPE_I){
3735 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3736 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3737 if (s->pict_type != AV_PICTURE_TYPE_B) {
3738 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3740 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3744 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3745 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3747 for(i=0; i<s->mb_stride*s->mb_height; i++)
3748 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3750 if(!s->fixed_qscale){
3751 /* finding spatial complexity for I-frame rate control */
3752 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3755 for(i=1; i<context_count; i++){
3756 merge_context_after_me(s, s->thread_context[i]);
3758 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3759 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3762 if (s->me.scene_change_score > s->scenechange_threshold &&
3763 s->pict_type == AV_PICTURE_TYPE_P) {
3764 s->pict_type= AV_PICTURE_TYPE_I;
3765 for(i=0; i<s->mb_stride*s->mb_height; i++)
3766 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3767 if(s->msmpeg4_version >= 3)
3769 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3770 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3774 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3775 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3777 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3779 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3780 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3781 s->f_code= FFMAX3(s->f_code, a, b);
3784 ff_fix_long_p_mvs(s);
3785 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3786 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3790 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3791 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3796 if(s->pict_type==AV_PICTURE_TYPE_B){
3799 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3800 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3801 s->f_code = FFMAX(a, b);
3803 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3804 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3805 s->b_code = FFMAX(a, b);
3807 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3808 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3809 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3810 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3811 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3813 for(dir=0; dir<2; dir++){
3816 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3817 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3818 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3819 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3827 if (estimate_qp(s, 0) < 0)
3830 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3831 s->pict_type == AV_PICTURE_TYPE_I &&
3832 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3833 s->qscale= 3; //reduce clipping problems
3835 if (s->out_format == FMT_MJPEG) {
3836 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3837 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3839 if (s->avctx->intra_matrix) {
3841 luma_matrix = s->avctx->intra_matrix;
3843 if (s->avctx->chroma_intra_matrix)
3844 chroma_matrix = s->avctx->chroma_intra_matrix;
3846 /* for mjpeg, we do include qscale in the matrix */
3848 int j = s->idsp.idct_permutation[i];
3850 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3851 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3853 s->y_dc_scale_table=
3854 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3855 s->chroma_intra_matrix[0] =
3856 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3857 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3858 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3859 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3860 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3863 if(s->codec_id == AV_CODEC_ID_AMV){
3864 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3865 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3867 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3869 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3870 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3872 s->y_dc_scale_table= y;
3873 s->c_dc_scale_table= c;
3874 s->intra_matrix[0] = 13;
3875 s->chroma_intra_matrix[0] = 14;
3876 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3877 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3878 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3879 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3883 //FIXME var duplication
3884 s->current_picture_ptr->f->key_frame =
3885 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3886 s->current_picture_ptr->f->pict_type =
3887 s->current_picture.f->pict_type = s->pict_type;
3889 if (s->current_picture.f->key_frame)
3890 s->picture_in_gop_number=0;
3892 s->mb_x = s->mb_y = 0;
3893 s->last_bits= put_bits_count(&s->pb);
3894 switch(s->out_format) {
3896 if (CONFIG_MJPEG_ENCODER)
3897 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3898 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3901 if (CONFIG_H261_ENCODER)
3902 ff_h261_encode_picture_header(s, picture_number);
3905 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3906 ff_wmv2_encode_picture_header(s, picture_number);
3907 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3908 ff_msmpeg4_encode_picture_header(s, picture_number);
3909 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3910 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3913 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3914 ret = ff_rv10_encode_picture_header(s, picture_number);
3918 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3919 ff_rv20_encode_picture_header(s, picture_number);
3920 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3921 ff_flv_encode_picture_header(s, picture_number);
3922 else if (CONFIG_H263_ENCODER)
3923 ff_h263_encode_picture_header(s, picture_number);
3926 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3927 ff_mpeg1_encode_picture_header(s, picture_number);
3932 bits= put_bits_count(&s->pb);
3933 s->header_bits= bits - s->last_bits;
3935 for(i=1; i<context_count; i++){
3936 update_duplicate_context_after_me(s->thread_context[i], s);
3938 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3939 for(i=1; i<context_count; i++){
3940 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3941 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3942 merge_context_after_encode(s, s->thread_context[i]);
3948 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3949 const int intra= s->mb_intra;
3952 s->dct_count[intra]++;
3954 for(i=0; i<64; i++){
3955 int level= block[i];
3959 s->dct_error_sum[intra][i] += level;
3960 level -= s->dct_offset[intra][i];
3961 if(level<0) level=0;
3963 s->dct_error_sum[intra][i] -= level;
3964 level += s->dct_offset[intra][i];
3965 if(level>0) level=0;
3972 static int dct_quantize_trellis_c(MpegEncContext *s,
3973 int16_t *block, int n,
3974 int qscale, int *overflow){
3976 const uint16_t *matrix;
3977 const uint8_t *scantable= s->intra_scantable.scantable;
3978 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3980 unsigned int threshold1, threshold2;
3992 int coeff_count[64];
3993 int qmul, qadd, start_i, last_non_zero, i, dc;
3994 const int esc_length= s->ac_esc_length;
3996 uint8_t * last_length;
3997 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4000 s->fdsp.fdct(block);
4002 if(s->dct_error_sum)
4003 s->denoise_dct(s, block);
4005 qadd= ((qscale-1)|1)*8;
4007 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4008 else mpeg2_qscale = qscale << 1;
4019 /* For AIC we skip quant/dequant of INTRADC */
4024 /* note: block[0] is assumed to be positive */
4025 block[0] = (block[0] + (q >> 1)) / q;
4028 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4029 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4030 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4031 bias= 1<<(QMAT_SHIFT-1);
4033 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4034 length = s->intra_chroma_ac_vlc_length;
4035 last_length= s->intra_chroma_ac_vlc_last_length;
4037 length = s->intra_ac_vlc_length;
4038 last_length= s->intra_ac_vlc_last_length;
4043 qmat = s->q_inter_matrix[qscale];
4044 matrix = s->inter_matrix;
4045 length = s->inter_ac_vlc_length;
4046 last_length= s->inter_ac_vlc_last_length;
4050 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4051 threshold2= (threshold1<<1);
4053 for(i=63; i>=start_i; i--) {
4054 const int j = scantable[i];
4055 int level = block[j] * qmat[j];
4057 if(((unsigned)(level+threshold1))>threshold2){
4063 for(i=start_i; i<=last_non_zero; i++) {
4064 const int j = scantable[i];
4065 int level = block[j] * qmat[j];
4067 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4068 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4069 if(((unsigned)(level+threshold1))>threshold2){
4071 level= (bias + level)>>QMAT_SHIFT;
4073 coeff[1][i]= level-1;
4074 // coeff[2][k]= level-2;
4076 level= (bias - level)>>QMAT_SHIFT;
4077 coeff[0][i]= -level;
4078 coeff[1][i]= -level+1;
4079 // coeff[2][k]= -level+2;
4081 coeff_count[i]= FFMIN(level, 2);
4082 av_assert2(coeff_count[i]);
4085 coeff[0][i]= (level>>31)|1;
4090 *overflow= s->max_qcoeff < max; //overflow might have happened
4092 if(last_non_zero < start_i){
4093 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4094 return last_non_zero;
4097 score_tab[start_i]= 0;
4098 survivor[0]= start_i;
4101 for(i=start_i; i<=last_non_zero; i++){
4102 int level_index, j, zero_distortion;
4103 int dct_coeff= FFABS(block[ scantable[i] ]);
4104 int best_score=256*256*256*120;
4106 if (s->fdsp.fdct == ff_fdct_ifast)
4107 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4108 zero_distortion= dct_coeff*dct_coeff;
4110 for(level_index=0; level_index < coeff_count[i]; level_index++){
4112 int level= coeff[level_index][i];
4113 const int alevel= FFABS(level);
4118 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4119 unquant_coeff= alevel*qmul + qadd;
4120 } else if(s->out_format == FMT_MJPEG) {
4121 j = s->idsp.idct_permutation[scantable[i]];
4122 unquant_coeff = alevel * matrix[j] * 8;
4124 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4126 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4127 unquant_coeff = (unquant_coeff - 1) | 1;
4129 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4130 unquant_coeff = (unquant_coeff - 1) | 1;
4135 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4137 if((level&(~127)) == 0){
4138 for(j=survivor_count-1; j>=0; j--){
4139 int run= i - survivor[j];
4140 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4141 score += score_tab[i-run];
4143 if(score < best_score){
4146 level_tab[i+1]= level-64;
4150 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4151 for(j=survivor_count-1; j>=0; j--){
4152 int run= i - survivor[j];
4153 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4154 score += score_tab[i-run];
4155 if(score < last_score){
4158 last_level= level-64;
4164 distortion += esc_length*lambda;
4165 for(j=survivor_count-1; j>=0; j--){
4166 int run= i - survivor[j];
4167 int score= distortion + score_tab[i-run];
4169 if(score < best_score){
4172 level_tab[i+1]= level-64;
4176 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4177 for(j=survivor_count-1; j>=0; j--){
4178 int run= i - survivor[j];
4179 int score= distortion + score_tab[i-run];
4180 if(score < last_score){
4183 last_level= level-64;
4191 score_tab[i+1]= best_score;
4193 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
4194 if(last_non_zero <= 27){
4195 for(; survivor_count; survivor_count--){
4196 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4200 for(; survivor_count; survivor_count--){
4201 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4206 survivor[ survivor_count++ ]= i+1;
4209 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4210 last_score= 256*256*256*120;
4211 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4212 int score= score_tab[i];
4213 if(i) score += lambda*2; //FIXME exacter?
4215 if(score < last_score){
4218 last_level= level_tab[i];
4219 last_run= run_tab[i];
4224 s->coded_score[n] = last_score;
4226 dc= FFABS(block[0]);
4227 last_non_zero= last_i - 1;
4228 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4230 if(last_non_zero < start_i)
4231 return last_non_zero;
4233 if(last_non_zero == 0 && start_i == 0){
4235 int best_score= dc * dc;
4237 for(i=0; i<coeff_count[0]; i++){
4238 int level= coeff[i][0];
4239 int alevel= FFABS(level);
4240 int unquant_coeff, score, distortion;
4242 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4243 unquant_coeff= (alevel*qmul + qadd)>>3;
4245 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4246 unquant_coeff = (unquant_coeff - 1) | 1;
4248 unquant_coeff = (unquant_coeff + 4) >> 3;
4249 unquant_coeff<<= 3 + 3;
4251 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4253 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4254 else score= distortion + esc_length*lambda;
4256 if(score < best_score){
4258 best_level= level - 64;
4261 block[0]= best_level;
4262 s->coded_score[n] = best_score - dc*dc;
4263 if(best_level == 0) return -1;
4264 else return last_non_zero;
4268 av_assert2(last_level);
4270 block[ perm_scantable[last_non_zero] ]= last_level;
4273 for(; i>start_i; i -= run_tab[i] + 1){
4274 block[ perm_scantable[i-1] ]= level_tab[i];
4277 return last_non_zero;
4280 //#define REFINE_STATS 1
4281 static int16_t basis[64][64];
4283 static void build_basis(uint8_t *perm){
4290 double s= 0.25*(1<<BASIS_SHIFT);
4292 int perm_index= perm[index];
4293 if(i==0) s*= sqrt(0.5);
4294 if(j==0) s*= sqrt(0.5);
4295 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4302 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4303 int16_t *block, int16_t *weight, int16_t *orig,
4306 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4307 const uint8_t *scantable= s->intra_scantable.scantable;
4308 const uint8_t *perm_scantable= s->intra_scantable.permutated;
4309 // unsigned int threshold1, threshold2;
4314 int qmul, qadd, start_i, last_non_zero, i, dc;
4316 uint8_t * last_length;
4318 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4321 static int after_last=0;
4322 static int to_zero=0;
4323 static int from_zero=0;
4326 static int messed_sign=0;
4329 if(basis[0][0] == 0)
4330 build_basis(s->idsp.idct_permutation);
4341 /* For AIC we skip quant/dequant of INTRADC */
4345 q <<= RECON_SHIFT-3;
4346 /* note: block[0] is assumed to be positive */
4348 // block[0] = (block[0] + (q >> 1)) / q;
4350 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4351 // bias= 1<<(QMAT_SHIFT-1);
4352 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4353 length = s->intra_chroma_ac_vlc_length;
4354 last_length= s->intra_chroma_ac_vlc_last_length;
4356 length = s->intra_ac_vlc_length;
4357 last_length= s->intra_ac_vlc_last_length;
4362 length = s->inter_ac_vlc_length;
4363 last_length= s->inter_ac_vlc_last_length;
4365 last_non_zero = s->block_last_index[n];
4370 dc += (1<<(RECON_SHIFT-1));
4371 for(i=0; i<64; i++){
4372 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
4375 STOP_TIMER("memset rem[]")}
4378 for(i=0; i<64; i++){
4383 w= FFABS(weight[i]) + qns*one;
4384 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4387 // w=weight[i] = (63*qns + (w/2)) / w;
4390 av_assert2(w<(1<<6));
4393 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4399 for(i=start_i; i<=last_non_zero; i++){
4400 int j= perm_scantable[i];
4401 const int level= block[j];
4405 if(level<0) coeff= qmul*level - qadd;
4406 else coeff= qmul*level + qadd;
4407 run_tab[rle_index++]=run;
4410 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4416 if(last_non_zero>0){
4417 STOP_TIMER("init rem[]")
4424 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4427 int run2, best_unquant_change=0, analyze_gradient;
4431 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4433 if(analyze_gradient){
4437 for(i=0; i<64; i++){
4440 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4443 STOP_TIMER("rem*w*w")}
4453 const int level= block[0];
4454 int change, old_coeff;
4456 av_assert2(s->mb_intra);
4460 for(change=-1; change<=1; change+=2){
4461 int new_level= level + change;
4462 int score, new_coeff;
4464 new_coeff= q*new_level;
4465 if(new_coeff >= 2048 || new_coeff < 0)
4468 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4469 new_coeff - old_coeff);
4470 if(score<best_score){
4473 best_change= change;
4474 best_unquant_change= new_coeff - old_coeff;
4481 run2= run_tab[rle_index++];
4485 for(i=start_i; i<64; i++){
4486 int j= perm_scantable[i];
4487 const int level= block[j];
4488 int change, old_coeff;
4490 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4494 if(level<0) old_coeff= qmul*level - qadd;
4495 else old_coeff= qmul*level + qadd;
4496 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4500 av_assert2(run2>=0 || i >= last_non_zero );
4503 for(change=-1; change<=1; change+=2){
4504 int new_level= level + change;
4505 int score, new_coeff, unquant_change;
4508 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4512 if(new_level<0) new_coeff= qmul*new_level - qadd;
4513 else new_coeff= qmul*new_level + qadd;
4514 if(new_coeff >= 2048 || new_coeff <= -2048)
4516 //FIXME check for overflow
4519 if(level < 63 && level > -63){
4520 if(i < last_non_zero)
4521 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4522 - length[UNI_AC_ENC_INDEX(run, level+64)];
4524 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4525 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4528 av_assert2(FFABS(new_level)==1);
4530 if(analyze_gradient){
4531 int g= d1[ scantable[i] ];
4532 if(g && (g^new_level) >= 0)
4536 if(i < last_non_zero){
4537 int next_i= i + run2 + 1;
4538 int next_level= block[ perm_scantable[next_i] ] + 64;
4540 if(next_level&(~127))
4543 if(next_i < last_non_zero)
4544 score += length[UNI_AC_ENC_INDEX(run, 65)]
4545 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4546 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4548 score += length[UNI_AC_ENC_INDEX(run, 65)]
4549 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4550 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4552 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4554 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4555 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4561 av_assert2(FFABS(level)==1);
4563 if(i < last_non_zero){
4564 int next_i= i + run2 + 1;
4565 int next_level= block[ perm_scantable[next_i] ] + 64;
4567 if(next_level&(~127))
4570 if(next_i < last_non_zero)
4571 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4572 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4573 - length[UNI_AC_ENC_INDEX(run, 65)];
4575 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4576 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4577 - length[UNI_AC_ENC_INDEX(run, 65)];
4579 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4581 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4582 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4589 unquant_change= new_coeff - old_coeff;
4590 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4592 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4594 if(score<best_score){
4597 best_change= change;
4598 best_unquant_change= unquant_change;
4602 prev_level= level + 64;
4603 if(prev_level&(~127))
4612 STOP_TIMER("iterative step")}
4616 int j= perm_scantable[ best_coeff ];
4618 block[j] += best_change;
4620 if(best_coeff > last_non_zero){
4621 last_non_zero= best_coeff;
4622 av_assert2(block[j]);
4629 if(block[j] - best_change){
4630 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4642 for(; last_non_zero>=start_i; last_non_zero--){
4643 if(block[perm_scantable[last_non_zero]])
4649 if(256*256*256*64 % count == 0){
4650 av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4655 for(i=start_i; i<=last_non_zero; i++){
4656 int j= perm_scantable[i];
4657 const int level= block[j];
4660 run_tab[rle_index++]=run;
4667 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4673 if(last_non_zero>0){
4674 STOP_TIMER("iterative search")
4679 return last_non_zero;
4683 * Permute an 8x8 block according to permuatation.
4684 * @param block the block which will be permuted according to
4685 * the given permutation vector
4686 * @param permutation the permutation vector
4687 * @param last the last non zero coefficient in scantable order, used to
4688 * speed the permutation up
4689 * @param scantable the used scantable, this is only used to speed the
4690 * permutation up, the block is not (inverse) permutated
4691 * to scantable order!
4693 void ff_block_permute(int16_t *block, uint8_t *permutation,
4694 const uint8_t *scantable, int last)
4701 //FIXME it is ok but not clean and might fail for some permutations
4702 // if (permutation[1] == 1)
4705 for (i = 0; i <= last; i++) {
4706 const int j = scantable[i];
4711 for (i = 0; i <= last; i++) {
4712 const int j = scantable[i];
4713 const int perm_j = permutation[j];
4714 block[perm_j] = temp[j];
4718 int ff_dct_quantize_c(MpegEncContext *s,
4719 int16_t *block, int n,
4720 int qscale, int *overflow)
4722 int i, j, level, last_non_zero, q, start_i;
4724 const uint8_t *scantable= s->intra_scantable.scantable;
4727 unsigned int threshold1, threshold2;
4729 s->fdsp.fdct(block);
4731 if(s->dct_error_sum)
4732 s->denoise_dct(s, block);
4742 /* For AIC we skip quant/dequant of INTRADC */
4745 /* note: block[0] is assumed to be positive */
4746 block[0] = (block[0] + (q >> 1)) / q;
4749 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4750 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4754 qmat = s->q_inter_matrix[qscale];
4755 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4757 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4758 threshold2= (threshold1<<1);
4759 for(i=63;i>=start_i;i--) {
4761 level = block[j] * qmat[j];
4763 if(((unsigned)(level+threshold1))>threshold2){
4770 for(i=start_i; i<=last_non_zero; i++) {
4772 level = block[j] * qmat[j];
4774 // if( bias+level >= (1<<QMAT_SHIFT)
4775 // || bias-level >= (1<<QMAT_SHIFT)){
4776 if(((unsigned)(level+threshold1))>threshold2){
4778 level= (bias + level)>>QMAT_SHIFT;
4781 level= (bias - level)>>QMAT_SHIFT;
4789 *overflow= s->max_qcoeff < max; //overflow might have happened
4791 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4792 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4793 ff_block_permute(block, s->idsp.idct_permutation,
4794 scantable, last_non_zero);
4796 return last_non_zero;
4799 #define OFFSET(x) offsetof(MpegEncContext, x)
4800 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4801 static const AVOption h263_options[] = {
4802 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4803 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4808 static const AVClass h263_class = {
4809 .class_name = "H.263 encoder",
4810 .item_name = av_default_item_name,
4811 .option = h263_options,
4812 .version = LIBAVUTIL_VERSION_INT,
4815 AVCodec ff_h263_encoder = {
4817 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4818 .type = AVMEDIA_TYPE_VIDEO,
4819 .id = AV_CODEC_ID_H263,
4820 .priv_data_size = sizeof(MpegEncContext),
4821 .init = ff_mpv_encode_init,
4822 .encode2 = ff_mpv_encode_picture,
4823 .close = ff_mpv_encode_end,
4824 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4825 .priv_class = &h263_class,
4828 static const AVOption h263p_options[] = {
4829 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4830 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4831 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4832 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4836 static const AVClass h263p_class = {
4837 .class_name = "H.263p encoder",
4838 .item_name = av_default_item_name,
4839 .option = h263p_options,
4840 .version = LIBAVUTIL_VERSION_INT,
4843 AVCodec ff_h263p_encoder = {
4845 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4846 .type = AVMEDIA_TYPE_VIDEO,
4847 .id = AV_CODEC_ID_H263P,
4848 .priv_data_size = sizeof(MpegEncContext),
4849 .init = ff_mpv_encode_init,
4850 .encode2 = ff_mpv_encode_picture,
4851 .close = ff_mpv_encode_end,
4852 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4853 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4854 .priv_class = &h263p_class,
4857 static const AVClass msmpeg4v2_class = {
4858 .class_name = "msmpeg4v2 encoder",
4859 .item_name = av_default_item_name,
4860 .option = ff_mpv_generic_options,
4861 .version = LIBAVUTIL_VERSION_INT,
4864 AVCodec ff_msmpeg4v2_encoder = {
4865 .name = "msmpeg4v2",
4866 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4867 .type = AVMEDIA_TYPE_VIDEO,
4868 .id = AV_CODEC_ID_MSMPEG4V2,
4869 .priv_data_size = sizeof(MpegEncContext),
4870 .init = ff_mpv_encode_init,
4871 .encode2 = ff_mpv_encode_picture,
4872 .close = ff_mpv_encode_end,
4873 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4874 .priv_class = &msmpeg4v2_class,
4877 static const AVClass msmpeg4v3_class = {
4878 .class_name = "msmpeg4v3 encoder",
4879 .item_name = av_default_item_name,
4880 .option = ff_mpv_generic_options,
4881 .version = LIBAVUTIL_VERSION_INT,
4884 AVCodec ff_msmpeg4v3_encoder = {
4886 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4887 .type = AVMEDIA_TYPE_VIDEO,
4888 .id = AV_CODEC_ID_MSMPEG4V3,
4889 .priv_data_size = sizeof(MpegEncContext),
4890 .init = ff_mpv_encode_init,
4891 .encode2 = ff_mpv_encode_picture,
4892 .close = ff_mpv_encode_end,
4893 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4894 .priv_class = &msmpeg4v3_class,
4897 static const AVClass wmv1_class = {
4898 .class_name = "wmv1 encoder",
4899 .item_name = av_default_item_name,
4900 .option = ff_mpv_generic_options,
4901 .version = LIBAVUTIL_VERSION_INT,
4904 AVCodec ff_wmv1_encoder = {
4906 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4907 .type = AVMEDIA_TYPE_VIDEO,
4908 .id = AV_CODEC_ID_WMV1,
4909 .priv_data_size = sizeof(MpegEncContext),
4910 .init = ff_mpv_encode_init,
4911 .encode2 = ff_mpv_encode_picture,
4912 .close = ff_mpv_encode_end,
4913 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4914 .priv_class = &wmv1_class,