2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "libavutil/timer.h"
46 #include "mpegvideo.h"
47 #include "mpegvideodata.h"
51 #include "mjpegenc_common.h"
53 #include "mpegutils.h"
56 #include "pixblockdsp.h"
60 #include "aandcttab.h"
62 #include "mpeg4video.h"
64 #include "bytestream.h"
70 #define QUANT_BIAS_SHIFT 8
72 #define QMAT_SHIFT_MMX 16
75 static int encode_picture(MpegEncContext *s, int picture_number);
76 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
77 static int sse_mb(MpegEncContext *s);
78 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
79 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
81 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
82 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
84 const AVOption ff_mpv_generic_options[] = {
89 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
90 uint16_t (*qmat16)[2][64],
91 const uint16_t *quant_matrix,
92 int bias, int qmin, int qmax, int intra)
94 FDCTDSPContext *fdsp = &s->fdsp;
98 for (qscale = qmin; qscale <= qmax; qscale++) {
102 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
103 else qscale2 = qscale << 1;
105 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
107 fdsp->fdct == ff_faandct ||
108 #endif /* CONFIG_FAANDCT */
109 fdsp->fdct == ff_jpeg_fdct_islow_10) {
110 for (i = 0; i < 64; i++) {
111 const int j = s->idsp.idct_permutation[i];
112 int64_t den = (int64_t) qscale2 * quant_matrix[j];
113 /* 16 <= qscale * quant_matrix[i] <= 7905
114 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
115 * 19952 <= x <= 249205026
116 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
117 * 3444240 >= (1 << 36) / (x) >= 275 */
119 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
121 } else if (fdsp->fdct == ff_fdct_ifast) {
122 for (i = 0; i < 64; i++) {
123 const int j = s->idsp.idct_permutation[i];
124 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
125 /* 16 <= qscale * quant_matrix[i] <= 7905
126 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
127 * 19952 <= x <= 249205026
128 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
129 * 3444240 >= (1 << 36) / (x) >= 275 */
131 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
134 for (i = 0; i < 64; i++) {
135 const int j = s->idsp.idct_permutation[i];
136 int64_t den = (int64_t) qscale2 * quant_matrix[j];
137 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
138 * Assume x = qscale * quant_matrix[i]
140 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
141 * so 32768 >= (1 << 19) / (x) >= 67 */
142 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
143 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
144 // (qscale * quant_matrix[i]);
145 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
147 if (qmat16[qscale][0][i] == 0 ||
148 qmat16[qscale][0][i] == 128 * 256)
149 qmat16[qscale][0][i] = 128 * 256 - 1;
150 qmat16[qscale][1][i] =
151 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
152 qmat16[qscale][0][i]);
156 for (i = intra; i < 64; i++) {
158 if (fdsp->fdct == ff_fdct_ifast) {
159 max = (8191LL * ff_aanscales[i]) >> 14;
161 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
167 av_log(NULL, AV_LOG_INFO,
168 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
173 static inline void update_qscale(MpegEncContext *s)
175 if (s->q_scale_type == 1 && 0) {
177 int bestdiff=INT_MAX;
180 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
181 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
182 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
183 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
185 if (diff < bestdiff) {
192 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
193 (FF_LAMBDA_SHIFT + 7);
194 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
197 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
201 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
207 for (i = 0; i < 64; i++) {
208 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
215 * init s->current_picture.qscale_table from s->lambda_table
217 void ff_init_qscale_tab(MpegEncContext *s)
219 int8_t * const qscale_table = s->current_picture.qscale_table;
222 for (i = 0; i < s->mb_num; i++) {
223 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
224 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
225 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
230 static void update_duplicate_context_after_me(MpegEncContext *dst,
233 #define COPY(a) dst->a= src->a
235 COPY(current_picture);
241 COPY(picture_in_gop_number);
242 COPY(gop_picture_number);
243 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
244 COPY(progressive_frame); // FIXME don't set in encode_header
245 COPY(partitioned_frame); // FIXME don't set in encode_header
250 * Set the given MpegEncContext to defaults for encoding.
251 * the changed fields will not depend upon the prior state of the MpegEncContext.
253 static void mpv_encode_defaults(MpegEncContext *s)
256 ff_mpv_common_defaults(s);
258 for (i = -16; i < 16; i++) {
259 default_fcode_tab[i + MAX_MV] = 1;
261 s->me.mv_penalty = default_mv_penalty;
262 s->fcode_tab = default_fcode_tab;
264 s->input_picture_number = 0;
265 s->picture_in_gop_number = 0;
268 av_cold int ff_dct_encode_init(MpegEncContext *s) {
270 ff_dct_encode_init_x86(s);
272 if (CONFIG_H263_ENCODER)
273 ff_h263dsp_init(&s->h263dsp);
274 if (!s->dct_quantize)
275 s->dct_quantize = ff_dct_quantize_c;
277 s->denoise_dct = denoise_dct_c;
278 s->fast_dct_quantize = s->dct_quantize;
279 if (s->avctx->trellis)
280 s->dct_quantize = dct_quantize_trellis_c;
285 /* init video encoder */
286 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
288 MpegEncContext *s = avctx->priv_data;
289 AVCPBProperties *cpb_props;
290 int i, ret, format_supported;
292 mpv_encode_defaults(s);
294 switch (avctx->codec_id) {
295 case AV_CODEC_ID_MPEG2VIDEO:
296 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
297 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
298 av_log(avctx, AV_LOG_ERROR,
299 "only YUV420 and YUV422 are supported\n");
303 case AV_CODEC_ID_MJPEG:
304 case AV_CODEC_ID_AMV:
305 format_supported = 0;
306 /* JPEG color space */
307 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
308 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
309 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
310 (avctx->color_range == AVCOL_RANGE_JPEG &&
311 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
312 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
313 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
314 format_supported = 1;
315 /* MPEG color space */
316 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
317 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
318 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
319 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
320 format_supported = 1;
322 if (!format_supported) {
323 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
328 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
329 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
334 switch (avctx->pix_fmt) {
335 case AV_PIX_FMT_YUVJ444P:
336 case AV_PIX_FMT_YUV444P:
337 s->chroma_format = CHROMA_444;
339 case AV_PIX_FMT_YUVJ422P:
340 case AV_PIX_FMT_YUV422P:
341 s->chroma_format = CHROMA_422;
343 case AV_PIX_FMT_YUVJ420P:
344 case AV_PIX_FMT_YUV420P:
346 s->chroma_format = CHROMA_420;
350 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
352 #if FF_API_PRIVATE_OPT
353 FF_DISABLE_DEPRECATION_WARNINGS
354 if (avctx->rtp_payload_size)
355 s->rtp_payload_size = avctx->rtp_payload_size;
356 if (avctx->me_penalty_compensation)
357 s->me_penalty_compensation = avctx->me_penalty_compensation;
359 s->me_pre = avctx->pre_me;
360 FF_ENABLE_DEPRECATION_WARNINGS
363 s->bit_rate = avctx->bit_rate;
364 s->width = avctx->width;
365 s->height = avctx->height;
366 if (avctx->gop_size > 600 &&
367 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
368 av_log(avctx, AV_LOG_WARNING,
369 "keyframe interval too large!, reducing it from %d to %d\n",
370 avctx->gop_size, 600);
371 avctx->gop_size = 600;
373 s->gop_size = avctx->gop_size;
375 if (avctx->max_b_frames > MAX_B_FRAMES) {
376 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
377 "is %d.\n", MAX_B_FRAMES);
378 avctx->max_b_frames = MAX_B_FRAMES;
380 s->max_b_frames = avctx->max_b_frames;
381 s->codec_id = avctx->codec->id;
382 s->strict_std_compliance = avctx->strict_std_compliance;
383 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
384 s->rtp_mode = !!s->rtp_payload_size;
385 s->intra_dc_precision = avctx->intra_dc_precision;
387 // workaround some differences between how applications specify dc precision
388 if (s->intra_dc_precision < 0) {
389 s->intra_dc_precision += 8;
390 } else if (s->intra_dc_precision >= 8)
391 s->intra_dc_precision -= 8;
393 if (s->intra_dc_precision < 0) {
394 av_log(avctx, AV_LOG_ERROR,
395 "intra dc precision must be positive, note some applications use"
396 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
397 return AVERROR(EINVAL);
400 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
401 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
402 return AVERROR(EINVAL);
404 s->user_specified_pts = AV_NOPTS_VALUE;
406 if (s->gop_size <= 1) {
413 #if FF_API_MOTION_EST
414 FF_DISABLE_DEPRECATION_WARNINGS
415 s->me_method = avctx->me_method;
416 FF_ENABLE_DEPRECATION_WARNINGS
420 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
423 FF_DISABLE_DEPRECATION_WARNINGS
424 if (avctx->border_masking != 0.0)
425 s->border_masking = avctx->border_masking;
426 FF_ENABLE_DEPRECATION_WARNINGS
429 s->adaptive_quant = (s->avctx->lumi_masking ||
430 s->avctx->dark_masking ||
431 s->avctx->temporal_cplx_masking ||
432 s->avctx->spatial_cplx_masking ||
433 s->avctx->p_masking ||
435 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
438 s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
440 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
441 switch(avctx->codec_id) {
442 case AV_CODEC_ID_MPEG1VIDEO:
443 case AV_CODEC_ID_MPEG2VIDEO:
444 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
446 case AV_CODEC_ID_MPEG4:
447 case AV_CODEC_ID_MSMPEG4V1:
448 case AV_CODEC_ID_MSMPEG4V2:
449 case AV_CODEC_ID_MSMPEG4V3:
450 if (avctx->rc_max_rate >= 15000000) {
451 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
452 } else if(avctx->rc_max_rate >= 2000000) {
453 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
454 } else if(avctx->rc_max_rate >= 384000) {
455 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
457 avctx->rc_buffer_size = 40;
458 avctx->rc_buffer_size *= 16384;
461 if (avctx->rc_buffer_size) {
462 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
466 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
467 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
471 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
472 av_log(avctx, AV_LOG_INFO,
473 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
476 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
477 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
481 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
482 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
486 if (avctx->rc_max_rate &&
487 avctx->rc_max_rate == avctx->bit_rate &&
488 avctx->rc_max_rate != avctx->rc_min_rate) {
489 av_log(avctx, AV_LOG_INFO,
490 "impossible bitrate constraints, this will fail\n");
493 if (avctx->rc_buffer_size &&
494 avctx->bit_rate * (int64_t)avctx->time_base.num >
495 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
496 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
500 if (!s->fixed_qscale &&
501 avctx->bit_rate * av_q2d(avctx->time_base) >
502 avctx->bit_rate_tolerance) {
503 av_log(avctx, AV_LOG_WARNING,
504 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, (int64_t)avctx->bit_rate);
505 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
508 if (s->avctx->rc_max_rate &&
509 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
510 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
511 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
512 90000LL * (avctx->rc_buffer_size - 1) >
513 s->avctx->rc_max_rate * 0xFFFFLL) {
514 av_log(avctx, AV_LOG_INFO,
515 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
516 "specified vbv buffer is too large for the given bitrate!\n");
519 if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
520 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
521 s->codec_id != AV_CODEC_ID_FLV1) {
522 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
526 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
527 av_log(avctx, AV_LOG_ERROR,
528 "OBMC is only supported with simple mb decision\n");
532 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
533 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
537 if (s->max_b_frames &&
538 s->codec_id != AV_CODEC_ID_MPEG4 &&
539 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
540 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
541 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
544 if (s->max_b_frames < 0) {
545 av_log(avctx, AV_LOG_ERROR,
546 "max b frames must be 0 or positive for mpegvideo based encoders\n");
550 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
551 s->codec_id == AV_CODEC_ID_H263 ||
552 s->codec_id == AV_CODEC_ID_H263P) &&
553 (avctx->sample_aspect_ratio.num > 255 ||
554 avctx->sample_aspect_ratio.den > 255)) {
555 av_log(avctx, AV_LOG_WARNING,
556 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
557 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
558 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
559 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
562 if ((s->codec_id == AV_CODEC_ID_H263 ||
563 s->codec_id == AV_CODEC_ID_H263P) &&
564 (avctx->width > 2048 ||
565 avctx->height > 1152 )) {
566 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
569 if ((s->codec_id == AV_CODEC_ID_H263 ||
570 s->codec_id == AV_CODEC_ID_H263P) &&
571 ((avctx->width &3) ||
572 (avctx->height&3) )) {
573 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
577 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
578 (avctx->width > 4095 ||
579 avctx->height > 4095 )) {
580 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
584 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
585 (avctx->width > 16383 ||
586 avctx->height > 16383 )) {
587 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
591 if (s->codec_id == AV_CODEC_ID_RV10 &&
593 avctx->height&15 )) {
594 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
595 return AVERROR(EINVAL);
598 if (s->codec_id == AV_CODEC_ID_RV20 &&
601 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
602 return AVERROR(EINVAL);
605 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
606 s->codec_id == AV_CODEC_ID_WMV2) &&
608 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
612 if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
613 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
614 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
618 #if FF_API_PRIVATE_OPT
619 FF_DISABLE_DEPRECATION_WARNINGS
620 if (avctx->mpeg_quant)
621 s->mpeg_quant = avctx->mpeg_quant;
622 FF_ENABLE_DEPRECATION_WARNINGS
625 // FIXME mpeg2 uses that too
626 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
627 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
628 av_log(avctx, AV_LOG_ERROR,
629 "mpeg2 style quantization not supported by codec\n");
633 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
634 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
638 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
639 s->avctx->mb_decision != FF_MB_DECISION_RD) {
640 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
644 #if FF_API_PRIVATE_OPT
645 FF_DISABLE_DEPRECATION_WARNINGS
646 if (avctx->scenechange_threshold)
647 s->scenechange_threshold = avctx->scenechange_threshold;
648 FF_ENABLE_DEPRECATION_WARNINGS
651 if (s->scenechange_threshold < 1000000000 &&
652 (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
653 av_log(avctx, AV_LOG_ERROR,
654 "closed gop with scene change detection are not supported yet, "
655 "set threshold to 1000000000\n");
659 if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
660 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
661 av_log(avctx, AV_LOG_ERROR,
662 "low delay forcing is only available for mpeg2\n");
665 if (s->max_b_frames != 0) {
666 av_log(avctx, AV_LOG_ERROR,
667 "B-frames cannot be used with low delay\n");
672 if (s->q_scale_type == 1) {
673 if (avctx->qmax > 28) {
674 av_log(avctx, AV_LOG_ERROR,
675 "non linear quant only supports qmax <= 28 currently\n");
680 if (avctx->slices > 1 &&
681 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
682 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
683 return AVERROR(EINVAL);
686 if (s->avctx->thread_count > 1 &&
687 s->codec_id != AV_CODEC_ID_MPEG4 &&
688 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
689 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
690 s->codec_id != AV_CODEC_ID_MJPEG &&
691 (s->codec_id != AV_CODEC_ID_H263P)) {
692 av_log(avctx, AV_LOG_ERROR,
693 "multi threaded encoding not supported by codec\n");
697 if (s->avctx->thread_count < 1) {
698 av_log(avctx, AV_LOG_ERROR,
699 "automatic thread number detection not supported by codec, "
704 if (!avctx->time_base.den || !avctx->time_base.num) {
705 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
709 #if FF_API_PRIVATE_OPT
710 FF_DISABLE_DEPRECATION_WARNINGS
711 if (avctx->b_frame_strategy)
712 s->b_frame_strategy = avctx->b_frame_strategy;
713 if (avctx->b_sensitivity != 40)
714 s->b_sensitivity = avctx->b_sensitivity;
715 FF_ENABLE_DEPRECATION_WARNINGS
718 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
719 av_log(avctx, AV_LOG_INFO,
720 "notice: b_frame_strategy only affects the first pass\n");
721 s->b_frame_strategy = 0;
724 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
726 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
727 avctx->time_base.den /= i;
728 avctx->time_base.num /= i;
732 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
733 // (a + x * 3 / 8) / x
734 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
735 s->inter_quant_bias = 0;
737 s->intra_quant_bias = 0;
739 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
742 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
743 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
744 return AVERROR(EINVAL);
747 #if FF_API_QUANT_BIAS
748 FF_DISABLE_DEPRECATION_WARNINGS
749 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
750 s->intra_quant_bias = avctx->intra_quant_bias;
751 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
752 s->inter_quant_bias = avctx->inter_quant_bias;
753 FF_ENABLE_DEPRECATION_WARNINGS
756 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
758 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
759 s->avctx->time_base.den > (1 << 16) - 1) {
760 av_log(avctx, AV_LOG_ERROR,
761 "timebase %d/%d not supported by MPEG 4 standard, "
762 "the maximum admitted value for the timebase denominator "
763 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
767 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
769 switch (avctx->codec->id) {
770 case AV_CODEC_ID_MPEG1VIDEO:
771 s->out_format = FMT_MPEG1;
772 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
773 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
775 case AV_CODEC_ID_MPEG2VIDEO:
776 s->out_format = FMT_MPEG1;
777 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
778 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
781 case AV_CODEC_ID_MJPEG:
782 case AV_CODEC_ID_AMV:
783 s->out_format = FMT_MJPEG;
784 s->intra_only = 1; /* force intra only for jpeg */
785 if (!CONFIG_MJPEG_ENCODER ||
786 ff_mjpeg_encode_init(s) < 0)
791 case AV_CODEC_ID_H261:
792 if (!CONFIG_H261_ENCODER)
794 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
795 av_log(avctx, AV_LOG_ERROR,
796 "The specified picture size of %dx%d is not valid for the "
797 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
798 s->width, s->height);
801 s->out_format = FMT_H261;
804 s->rtp_mode = 0; /* Sliced encoding not supported */
806 case AV_CODEC_ID_H263:
807 if (!CONFIG_H263_ENCODER)
809 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
810 s->width, s->height) == 8) {
811 av_log(avctx, AV_LOG_ERROR,
812 "The specified picture size of %dx%d is not valid for "
813 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
814 "352x288, 704x576, and 1408x1152. "
815 "Try H.263+.\n", s->width, s->height);
818 s->out_format = FMT_H263;
822 case AV_CODEC_ID_H263P:
823 s->out_format = FMT_H263;
826 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
827 s->modified_quant = s->h263_aic;
828 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
829 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
832 /* These are just to be sure */
836 case AV_CODEC_ID_FLV1:
837 s->out_format = FMT_H263;
838 s->h263_flv = 2; /* format = 1; 11-bit codes */
839 s->unrestricted_mv = 1;
840 s->rtp_mode = 0; /* don't allow GOB */
844 case AV_CODEC_ID_RV10:
845 s->out_format = FMT_H263;
849 case AV_CODEC_ID_RV20:
850 s->out_format = FMT_H263;
853 s->modified_quant = 1;
857 s->unrestricted_mv = 0;
859 case AV_CODEC_ID_MPEG4:
860 s->out_format = FMT_H263;
862 s->unrestricted_mv = 1;
863 s->low_delay = s->max_b_frames ? 0 : 1;
864 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
866 case AV_CODEC_ID_MSMPEG4V2:
867 s->out_format = FMT_H263;
869 s->unrestricted_mv = 1;
870 s->msmpeg4_version = 2;
874 case AV_CODEC_ID_MSMPEG4V3:
875 s->out_format = FMT_H263;
877 s->unrestricted_mv = 1;
878 s->msmpeg4_version = 3;
879 s->flipflop_rounding = 1;
883 case AV_CODEC_ID_WMV1:
884 s->out_format = FMT_H263;
886 s->unrestricted_mv = 1;
887 s->msmpeg4_version = 4;
888 s->flipflop_rounding = 1;
892 case AV_CODEC_ID_WMV2:
893 s->out_format = FMT_H263;
895 s->unrestricted_mv = 1;
896 s->msmpeg4_version = 5;
897 s->flipflop_rounding = 1;
905 #if FF_API_PRIVATE_OPT
906 FF_DISABLE_DEPRECATION_WARNINGS
907 if (avctx->noise_reduction)
908 s->noise_reduction = avctx->noise_reduction;
909 FF_ENABLE_DEPRECATION_WARNINGS
912 avctx->has_b_frames = !s->low_delay;
916 s->progressive_frame =
917 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
918 AV_CODEC_FLAG_INTERLACED_ME) ||
923 if (ff_mpv_common_init(s) < 0)
926 ff_fdctdsp_init(&s->fdsp, avctx);
927 ff_me_cmp_init(&s->mecc, avctx);
928 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
929 ff_pixblockdsp_init(&s->pdsp, avctx);
930 ff_qpeldsp_init(&s->qdsp);
932 if (s->msmpeg4_version) {
933 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
934 2 * 2 * (MAX_LEVEL + 1) *
935 (MAX_RUN + 1) * 2 * sizeof(int), fail);
937 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
939 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
940 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
941 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
942 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
943 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
944 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
945 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
946 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
947 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
948 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
951 if (s->noise_reduction) {
952 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
953 2 * 64 * sizeof(uint16_t), fail);
956 ff_dct_encode_init(s);
958 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
959 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
961 if (s->slice_context_count > 1) {
964 if (avctx->codec_id == AV_CODEC_ID_H263P)
965 s->h263_slice_structured = 1;
968 s->quant_precision = 5;
970 #if FF_API_PRIVATE_OPT
971 FF_DISABLE_DEPRECATION_WARNINGS
972 if (avctx->frame_skip_threshold)
973 s->frame_skip_threshold = avctx->frame_skip_threshold;
974 if (avctx->frame_skip_factor)
975 s->frame_skip_factor = avctx->frame_skip_factor;
976 if (avctx->frame_skip_exp)
977 s->frame_skip_exp = avctx->frame_skip_exp;
978 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
979 s->frame_skip_cmp = avctx->frame_skip_cmp;
980 FF_ENABLE_DEPRECATION_WARNINGS
983 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
984 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
986 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
987 ff_h261_encode_init(s);
988 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
989 ff_h263_encode_init(s);
990 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
991 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
993 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
994 && s->out_format == FMT_MPEG1)
995 ff_mpeg1_encode_init(s);
998 for (i = 0; i < 64; i++) {
999 int j = s->idsp.idct_permutation[i];
1000 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1002 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1003 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1004 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1005 s->intra_matrix[j] =
1006 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1009 s->chroma_intra_matrix[j] =
1010 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1011 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1013 if (s->avctx->intra_matrix)
1014 s->intra_matrix[j] = s->avctx->intra_matrix[i];
1015 if (s->avctx->inter_matrix)
1016 s->inter_matrix[j] = s->avctx->inter_matrix[i];
1019 /* precompute matrix */
1020 /* for mjpeg, we do include qscale in the matrix */
1021 if (s->out_format != FMT_MJPEG) {
1022 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1023 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1025 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1026 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1030 if (ff_rate_control_init(s) < 0)
1033 #if FF_API_ERROR_RATE
1034 FF_DISABLE_DEPRECATION_WARNINGS
1035 if (avctx->error_rate)
1036 s->error_rate = avctx->error_rate;
1037 FF_ENABLE_DEPRECATION_WARNINGS;
1040 #if FF_API_NORMALIZE_AQP
1041 FF_DISABLE_DEPRECATION_WARNINGS
1042 if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
1043 s->mpv_flags |= FF_MPV_FLAG_NAQ;
1044 FF_ENABLE_DEPRECATION_WARNINGS;
1048 FF_DISABLE_DEPRECATION_WARNINGS
1049 if (avctx->flags & CODEC_FLAG_MV0)
1050 s->mpv_flags |= FF_MPV_FLAG_MV0;
1051 FF_ENABLE_DEPRECATION_WARNINGS
1055 FF_DISABLE_DEPRECATION_WARNINGS
1056 if (avctx->rc_qsquish != 0.0)
1057 s->rc_qsquish = avctx->rc_qsquish;
1058 if (avctx->rc_qmod_amp != 0.0)
1059 s->rc_qmod_amp = avctx->rc_qmod_amp;
1060 if (avctx->rc_qmod_freq)
1061 s->rc_qmod_freq = avctx->rc_qmod_freq;
1062 if (avctx->rc_buffer_aggressivity != 1.0)
1063 s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
1064 if (avctx->rc_initial_cplx != 0.0)
1065 s->rc_initial_cplx = avctx->rc_initial_cplx;
1067 s->lmin = avctx->lmin;
1069 s->lmax = avctx->lmax;
1072 av_freep(&s->rc_eq);
1073 s->rc_eq = av_strdup(avctx->rc_eq);
1075 return AVERROR(ENOMEM);
1077 FF_ENABLE_DEPRECATION_WARNINGS
1080 #if FF_API_PRIVATE_OPT
1081 FF_DISABLE_DEPRECATION_WARNINGS
1082 if (avctx->brd_scale)
1083 s->brd_scale = avctx->brd_scale;
1085 if (avctx->prediction_method)
1086 s->pred = avctx->prediction_method + 1;
1087 FF_ENABLE_DEPRECATION_WARNINGS
1090 if (s->b_frame_strategy == 2) {
1091 for (i = 0; i < s->max_b_frames + 2; i++) {
1092 s->tmp_frames[i] = av_frame_alloc();
1093 if (!s->tmp_frames[i])
1094 return AVERROR(ENOMEM);
1096 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1097 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1098 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1100 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1106 cpb_props = ff_add_cpb_side_data(avctx);
1108 return AVERROR(ENOMEM);
1109 cpb_props->max_bitrate = avctx->rc_max_rate;
1110 cpb_props->min_bitrate = avctx->rc_min_rate;
1111 cpb_props->avg_bitrate = avctx->bit_rate;
1112 cpb_props->buffer_size = avctx->rc_buffer_size;
1116 ff_mpv_encode_end(avctx);
1117 return AVERROR_UNKNOWN;
1120 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1122 MpegEncContext *s = avctx->priv_data;
1125 ff_rate_control_uninit(s);
1127 ff_mpv_common_end(s);
1128 if (CONFIG_MJPEG_ENCODER &&
1129 s->out_format == FMT_MJPEG)
1130 ff_mjpeg_encode_close(s);
1132 av_freep(&avctx->extradata);
1134 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1135 av_frame_free(&s->tmp_frames[i]);
1137 ff_free_picture_tables(&s->new_picture);
1138 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1140 av_freep(&s->avctx->stats_out);
1141 av_freep(&s->ac_stats);
1143 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1144 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1145 s->q_chroma_intra_matrix= NULL;
1146 s->q_chroma_intra_matrix16= NULL;
1147 av_freep(&s->q_intra_matrix);
1148 av_freep(&s->q_inter_matrix);
1149 av_freep(&s->q_intra_matrix16);
1150 av_freep(&s->q_inter_matrix16);
1151 av_freep(&s->input_picture);
1152 av_freep(&s->reordered_input_picture);
1153 av_freep(&s->dct_offset);
1158 static int get_sae(uint8_t *src, int ref, int stride)
1163 for (y = 0; y < 16; y++) {
1164 for (x = 0; x < 16; x++) {
1165 acc += FFABS(src[x + y * stride] - ref);
1172 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1173 uint8_t *ref, int stride)
1179 h = s->height & ~15;
1181 for (y = 0; y < h; y += 16) {
1182 for (x = 0; x < w; x += 16) {
1183 int offset = x + y * stride;
1184 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1186 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1187 int sae = get_sae(src + offset, mean, stride);
1189 acc += sae + 500 < sad;
1195 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1197 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1198 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1199 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1200 &s->linesize, &s->uvlinesize);
1203 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1205 Picture *pic = NULL;
1207 int i, display_picture_number = 0, ret;
1208 int encoding_delay = s->max_b_frames ? s->max_b_frames
1209 : (s->low_delay ? 0 : 1);
1210 int flush_offset = 1;
1215 display_picture_number = s->input_picture_number++;
1217 if (pts != AV_NOPTS_VALUE) {
1218 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1219 int64_t last = s->user_specified_pts;
1222 av_log(s->avctx, AV_LOG_ERROR,
1223 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1225 return AVERROR(EINVAL);
1228 if (!s->low_delay && display_picture_number == 1)
1229 s->dts_delta = pts - last;
1231 s->user_specified_pts = pts;
1233 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1234 s->user_specified_pts =
1235 pts = s->user_specified_pts + 1;
1236 av_log(s->avctx, AV_LOG_INFO,
1237 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1240 pts = display_picture_number;
1244 if (!pic_arg->buf[0] ||
1245 pic_arg->linesize[0] != s->linesize ||
1246 pic_arg->linesize[1] != s->uvlinesize ||
1247 pic_arg->linesize[2] != s->uvlinesize)
1249 if ((s->width & 15) || (s->height & 15))
1251 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1253 if (s->linesize & (STRIDE_ALIGN-1))
1256 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1257 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1259 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1263 pic = &s->picture[i];
1267 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1270 ret = alloc_picture(s, pic, direct);
1275 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1276 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1277 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1280 int h_chroma_shift, v_chroma_shift;
1281 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1285 for (i = 0; i < 3; i++) {
1286 int src_stride = pic_arg->linesize[i];
1287 int dst_stride = i ? s->uvlinesize : s->linesize;
1288 int h_shift = i ? h_chroma_shift : 0;
1289 int v_shift = i ? v_chroma_shift : 0;
1290 int w = s->width >> h_shift;
1291 int h = s->height >> v_shift;
1292 uint8_t *src = pic_arg->data[i];
1293 uint8_t *dst = pic->f->data[i];
1296 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1297 && !s->progressive_sequence
1298 && FFALIGN(s->height, 32) - s->height > 16)
1301 if (!s->avctx->rc_buffer_size)
1302 dst += INPLACE_OFFSET;
1304 if (src_stride == dst_stride)
1305 memcpy(dst, src, src_stride * h);
1308 uint8_t *dst2 = dst;
1310 memcpy(dst2, src, w);
1315 if ((s->width & 15) || (s->height & (vpad-1))) {
1316 s->mpvencdsp.draw_edges(dst, dst_stride,
1325 ret = av_frame_copy_props(pic->f, pic_arg);
1329 pic->f->display_picture_number = display_picture_number;
1330 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1332 /* Flushing: When we have not received enough input frames,
1333 * ensure s->input_picture[0] contains the first picture */
1334 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1335 if (s->input_picture[flush_offset])
1338 if (flush_offset <= 1)
1341 encoding_delay = encoding_delay - flush_offset + 1;
1344 /* shift buffer entries */
1345 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1346 s->input_picture[i - flush_offset] = s->input_picture[i];
1348 s->input_picture[encoding_delay] = (Picture*) pic;
1353 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1357 int64_t score64 = 0;
1359 for (plane = 0; plane < 3; plane++) {
1360 const int stride = p->f->linesize[plane];
1361 const int bw = plane ? 1 : 2;
1362 for (y = 0; y < s->mb_height * bw; y++) {
1363 for (x = 0; x < s->mb_width * bw; x++) {
1364 int off = p->shared ? 0 : 16;
1365 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1366 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1367 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1369 switch (FFABS(s->frame_skip_exp)) {
1370 case 0: score = FFMAX(score, v); break;
1371 case 1: score += FFABS(v); break;
1372 case 2: score64 += v * (int64_t)v; break;
1373 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1374 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1383 if (s->frame_skip_exp < 0)
1384 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1385 -1.0/s->frame_skip_exp);
1387 if (score64 < s->frame_skip_threshold)
1389 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1394 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1396 AVPacket pkt = { 0 };
1397 int ret, got_output;
1399 av_init_packet(&pkt);
1400 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1405 av_packet_unref(&pkt);
1409 static int estimate_best_b_count(MpegEncContext *s)
1411 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1412 AVCodecContext *c = avcodec_alloc_context3(NULL);
1413 const int scale = s->brd_scale;
1414 int i, j, out_size, p_lambda, b_lambda, lambda2;
1415 int64_t best_rd = INT64_MAX;
1416 int best_b_count = -1;
1419 return AVERROR(ENOMEM);
1420 av_assert0(scale >= 0 && scale <= 3);
1423 //s->next_picture_ptr->quality;
1424 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1425 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1426 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1427 if (!b_lambda) // FIXME we should do this somewhere else
1428 b_lambda = p_lambda;
1429 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1432 c->width = s->width >> scale;
1433 c->height = s->height >> scale;
1434 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1435 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1436 c->mb_decision = s->avctx->mb_decision;
1437 c->me_cmp = s->avctx->me_cmp;
1438 c->mb_cmp = s->avctx->mb_cmp;
1439 c->me_sub_cmp = s->avctx->me_sub_cmp;
1440 c->pix_fmt = AV_PIX_FMT_YUV420P;
1441 c->time_base = s->avctx->time_base;
1442 c->max_b_frames = s->max_b_frames;
1444 if (avcodec_open2(c, codec, NULL) < 0)
1447 for (i = 0; i < s->max_b_frames + 2; i++) {
1448 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1449 s->next_picture_ptr;
1452 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1453 pre_input = *pre_input_ptr;
1454 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1456 if (!pre_input.shared && i) {
1457 data[0] += INPLACE_OFFSET;
1458 data[1] += INPLACE_OFFSET;
1459 data[2] += INPLACE_OFFSET;
1462 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1463 s->tmp_frames[i]->linesize[0],
1465 pre_input.f->linesize[0],
1466 c->width, c->height);
1467 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1468 s->tmp_frames[i]->linesize[1],
1470 pre_input.f->linesize[1],
1471 c->width >> 1, c->height >> 1);
1472 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1473 s->tmp_frames[i]->linesize[2],
1475 pre_input.f->linesize[2],
1476 c->width >> 1, c->height >> 1);
1480 for (j = 0; j < s->max_b_frames + 1; j++) {
1483 if (!s->input_picture[j])
1486 c->error[0] = c->error[1] = c->error[2] = 0;
1488 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1489 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1491 out_size = encode_frame(c, s->tmp_frames[0]);
1493 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1495 for (i = 0; i < s->max_b_frames + 1; i++) {
1496 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1498 s->tmp_frames[i + 1]->pict_type = is_p ?
1499 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1500 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1502 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1504 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1507 /* get the delayed frames */
1509 out_size = encode_frame(c, NULL);
1510 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1513 rd += c->error[0] + c->error[1] + c->error[2];
1521 avcodec_free_context(&c);
1523 return best_b_count;
1526 static int select_input_picture(MpegEncContext *s)
1530 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1531 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1532 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1534 /* set next picture type & ordering */
1535 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1536 if (s->frame_skip_threshold || s->frame_skip_factor) {
1537 if (s->picture_in_gop_number < s->gop_size &&
1538 s->next_picture_ptr &&
1539 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1540 // FIXME check that the gop check above is +-1 correct
1541 av_frame_unref(s->input_picture[0]->f);
1543 ff_vbv_update(s, 0);
1549 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1550 !s->next_picture_ptr || s->intra_only) {
1551 s->reordered_input_picture[0] = s->input_picture[0];
1552 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1553 s->reordered_input_picture[0]->f->coded_picture_number =
1554 s->coded_picture_number++;
1558 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1559 for (i = 0; i < s->max_b_frames + 1; i++) {
1560 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1562 if (pict_num >= s->rc_context.num_entries)
1564 if (!s->input_picture[i]) {
1565 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1569 s->input_picture[i]->f->pict_type =
1570 s->rc_context.entry[pict_num].new_pict_type;
1574 if (s->b_frame_strategy == 0) {
1575 b_frames = s->max_b_frames;
1576 while (b_frames && !s->input_picture[b_frames])
1578 } else if (s->b_frame_strategy == 1) {
1579 for (i = 1; i < s->max_b_frames + 1; i++) {
1580 if (s->input_picture[i] &&
1581 s->input_picture[i]->b_frame_score == 0) {
1582 s->input_picture[i]->b_frame_score =
1584 s->input_picture[i ]->f->data[0],
1585 s->input_picture[i - 1]->f->data[0],
1589 for (i = 0; i < s->max_b_frames + 1; i++) {
1590 if (!s->input_picture[i] ||
1591 s->input_picture[i]->b_frame_score - 1 >
1592 s->mb_num / s->b_sensitivity)
1596 b_frames = FFMAX(0, i - 1);
1599 for (i = 0; i < b_frames + 1; i++) {
1600 s->input_picture[i]->b_frame_score = 0;
1602 } else if (s->b_frame_strategy == 2) {
1603 b_frames = estimate_best_b_count(s);
1608 for (i = b_frames - 1; i >= 0; i--) {
1609 int type = s->input_picture[i]->f->pict_type;
1610 if (type && type != AV_PICTURE_TYPE_B)
1613 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1614 b_frames == s->max_b_frames) {
1615 av_log(s->avctx, AV_LOG_ERROR,
1616 "warning, too many B-frames in a row\n");
1619 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1620 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1621 s->gop_size > s->picture_in_gop_number) {
1622 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1624 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1626 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1630 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1631 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1634 s->reordered_input_picture[0] = s->input_picture[b_frames];
1635 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1636 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1637 s->reordered_input_picture[0]->f->coded_picture_number =
1638 s->coded_picture_number++;
1639 for (i = 0; i < b_frames; i++) {
1640 s->reordered_input_picture[i + 1] = s->input_picture[i];
1641 s->reordered_input_picture[i + 1]->f->pict_type =
1643 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1644 s->coded_picture_number++;
1649 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1651 if (s->reordered_input_picture[0]) {
1652 s->reordered_input_picture[0]->reference =
1653 s->reordered_input_picture[0]->f->pict_type !=
1654 AV_PICTURE_TYPE_B ? 3 : 0;
1656 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1659 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1660 // input is a shared pix, so we can't modify it -> allocate a new
1661 // one & ensure that the shared one is reuseable
1664 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1667 pic = &s->picture[i];
1669 pic->reference = s->reordered_input_picture[0]->reference;
1670 if (alloc_picture(s, pic, 0) < 0) {
1674 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1678 /* mark us unused / free shared pic */
1679 av_frame_unref(s->reordered_input_picture[0]->f);
1680 s->reordered_input_picture[0]->shared = 0;
1682 s->current_picture_ptr = pic;
1684 // input is not a shared pix -> reuse buffer for current_pix
1685 s->current_picture_ptr = s->reordered_input_picture[0];
1686 for (i = 0; i < 4; i++) {
1687 s->new_picture.f->data[i] += INPLACE_OFFSET;
1690 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1691 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1692 s->current_picture_ptr)) < 0)
1695 s->picture_number = s->new_picture.f->display_picture_number;
1700 static void frame_end(MpegEncContext *s)
1702 if (s->unrestricted_mv &&
1703 s->current_picture.reference &&
1705 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1706 int hshift = desc->log2_chroma_w;
1707 int vshift = desc->log2_chroma_h;
1708 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1709 s->current_picture.f->linesize[0],
1710 s->h_edge_pos, s->v_edge_pos,
1711 EDGE_WIDTH, EDGE_WIDTH,
1712 EDGE_TOP | EDGE_BOTTOM);
1713 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1714 s->current_picture.f->linesize[1],
1715 s->h_edge_pos >> hshift,
1716 s->v_edge_pos >> vshift,
1717 EDGE_WIDTH >> hshift,
1718 EDGE_WIDTH >> vshift,
1719 EDGE_TOP | EDGE_BOTTOM);
1720 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1721 s->current_picture.f->linesize[2],
1722 s->h_edge_pos >> hshift,
1723 s->v_edge_pos >> vshift,
1724 EDGE_WIDTH >> hshift,
1725 EDGE_WIDTH >> vshift,
1726 EDGE_TOP | EDGE_BOTTOM);
1731 s->last_pict_type = s->pict_type;
1732 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1733 if (s->pict_type!= AV_PICTURE_TYPE_B)
1734 s->last_non_b_pict_type = s->pict_type;
1736 #if FF_API_CODED_FRAME
1737 FF_DISABLE_DEPRECATION_WARNINGS
1738 av_frame_unref(s->avctx->coded_frame);
1739 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1740 FF_ENABLE_DEPRECATION_WARNINGS
1742 #if FF_API_ERROR_FRAME
1743 FF_DISABLE_DEPRECATION_WARNINGS
1744 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1745 sizeof(s->current_picture.encoding_error));
1746 FF_ENABLE_DEPRECATION_WARNINGS
1750 static void update_noise_reduction(MpegEncContext *s)
1754 for (intra = 0; intra < 2; intra++) {
1755 if (s->dct_count[intra] > (1 << 16)) {
1756 for (i = 0; i < 64; i++) {
1757 s->dct_error_sum[intra][i] >>= 1;
1759 s->dct_count[intra] >>= 1;
1762 for (i = 0; i < 64; i++) {
1763 s->dct_offset[intra][i] = (s->noise_reduction *
1764 s->dct_count[intra] +
1765 s->dct_error_sum[intra][i] / 2) /
1766 (s->dct_error_sum[intra][i] + 1);
1771 static int frame_start(MpegEncContext *s)
1775 /* mark & release old frames */
1776 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1777 s->last_picture_ptr != s->next_picture_ptr &&
1778 s->last_picture_ptr->f->buf[0]) {
1779 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1782 s->current_picture_ptr->f->pict_type = s->pict_type;
1783 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1785 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1786 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1787 s->current_picture_ptr)) < 0)
1790 if (s->pict_type != AV_PICTURE_TYPE_B) {
1791 s->last_picture_ptr = s->next_picture_ptr;
1793 s->next_picture_ptr = s->current_picture_ptr;
1796 if (s->last_picture_ptr) {
1797 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1798 if (s->last_picture_ptr->f->buf[0] &&
1799 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1800 s->last_picture_ptr)) < 0)
1803 if (s->next_picture_ptr) {
1804 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1805 if (s->next_picture_ptr->f->buf[0] &&
1806 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1807 s->next_picture_ptr)) < 0)
1811 if (s->picture_structure!= PICT_FRAME) {
1813 for (i = 0; i < 4; i++) {
1814 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1815 s->current_picture.f->data[i] +=
1816 s->current_picture.f->linesize[i];
1818 s->current_picture.f->linesize[i] *= 2;
1819 s->last_picture.f->linesize[i] *= 2;
1820 s->next_picture.f->linesize[i] *= 2;
1824 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1825 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1826 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1827 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1828 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1829 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1831 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1832 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1835 if (s->dct_error_sum) {
1836 av_assert2(s->noise_reduction && s->encoding);
1837 update_noise_reduction(s);
1843 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1844 const AVFrame *pic_arg, int *got_packet)
1846 MpegEncContext *s = avctx->priv_data;
1847 int i, stuffing_count, ret;
1848 int context_count = s->slice_context_count;
1850 s->vbv_ignore_qmax = 0;
1852 s->picture_in_gop_number++;
1854 if (load_input_picture(s, pic_arg) < 0)
1857 if (select_input_picture(s) < 0) {
1862 if (s->new_picture.f->data[0]) {
1863 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1864 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1866 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1867 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1870 s->mb_info_ptr = av_packet_new_side_data(pkt,
1871 AV_PKT_DATA_H263_MB_INFO,
1872 s->mb_width*s->mb_height*12);
1873 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1876 for (i = 0; i < context_count; i++) {
1877 int start_y = s->thread_context[i]->start_mb_y;
1878 int end_y = s->thread_context[i]-> end_mb_y;
1879 int h = s->mb_height;
1880 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1881 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1883 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1886 s->pict_type = s->new_picture.f->pict_type;
1888 ret = frame_start(s);
1892 ret = encode_picture(s, s->picture_number);
1893 if (growing_buffer) {
1894 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1895 pkt->data = s->pb.buf;
1896 pkt->size = avctx->internal->byte_buffer_size;
1901 #if FF_API_STAT_BITS
1902 FF_DISABLE_DEPRECATION_WARNINGS
1903 avctx->header_bits = s->header_bits;
1904 avctx->mv_bits = s->mv_bits;
1905 avctx->misc_bits = s->misc_bits;
1906 avctx->i_tex_bits = s->i_tex_bits;
1907 avctx->p_tex_bits = s->p_tex_bits;
1908 avctx->i_count = s->i_count;
1909 // FIXME f/b_count in avctx
1910 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1911 avctx->skip_count = s->skip_count;
1912 FF_ENABLE_DEPRECATION_WARNINGS
1917 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1918 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1920 if (avctx->rc_buffer_size) {
1921 RateControlContext *rcc = &s->rc_context;
1922 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1923 int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1924 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1926 if (put_bits_count(&s->pb) > max_size &&
1927 s->lambda < s->lmax) {
1928 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1929 (s->qscale + 1) / s->qscale);
1930 if (s->adaptive_quant) {
1932 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1933 s->lambda_table[i] =
1934 FFMAX(s->lambda_table[i] + min_step,
1935 s->lambda_table[i] * (s->qscale + 1) /
1938 s->mb_skipped = 0; // done in frame_start()
1939 // done in encode_picture() so we must undo it
1940 if (s->pict_type == AV_PICTURE_TYPE_P) {
1941 if (s->flipflop_rounding ||
1942 s->codec_id == AV_CODEC_ID_H263P ||
1943 s->codec_id == AV_CODEC_ID_MPEG4)
1944 s->no_rounding ^= 1;
1946 if (s->pict_type != AV_PICTURE_TYPE_B) {
1947 s->time_base = s->last_time_base;
1948 s->last_non_b_time = s->time - s->pp_time;
1950 for (i = 0; i < context_count; i++) {
1951 PutBitContext *pb = &s->thread_context[i]->pb;
1952 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1954 s->vbv_ignore_qmax = 1;
1955 av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1959 av_assert0(s->avctx->rc_max_rate);
1962 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1963 ff_write_pass1_stats(s);
1965 for (i = 0; i < 4; i++) {
1966 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1967 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1969 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1970 s->current_picture_ptr->encoding_error,
1971 (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1974 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1975 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1976 s->misc_bits + s->i_tex_bits +
1978 flush_put_bits(&s->pb);
1979 s->frame_bits = put_bits_count(&s->pb);
1981 stuffing_count = ff_vbv_update(s, s->frame_bits);
1982 s->stuffing_bits = 8*stuffing_count;
1983 if (stuffing_count) {
1984 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1985 stuffing_count + 50) {
1986 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1990 switch (s->codec_id) {
1991 case AV_CODEC_ID_MPEG1VIDEO:
1992 case AV_CODEC_ID_MPEG2VIDEO:
1993 while (stuffing_count--) {
1994 put_bits(&s->pb, 8, 0);
1997 case AV_CODEC_ID_MPEG4:
1998 put_bits(&s->pb, 16, 0);
1999 put_bits(&s->pb, 16, 0x1C3);
2000 stuffing_count -= 4;
2001 while (stuffing_count--) {
2002 put_bits(&s->pb, 8, 0xFF);
2006 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2008 flush_put_bits(&s->pb);
2009 s->frame_bits = put_bits_count(&s->pb);
2012 /* update MPEG-1/2 vbv_delay for CBR */
2013 if (s->avctx->rc_max_rate &&
2014 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
2015 s->out_format == FMT_MPEG1 &&
2016 90000LL * (avctx->rc_buffer_size - 1) <=
2017 s->avctx->rc_max_rate * 0xFFFFLL) {
2018 AVCPBProperties *props;
2021 int vbv_delay, min_delay;
2022 double inbits = s->avctx->rc_max_rate *
2023 av_q2d(s->avctx->time_base);
2024 int minbits = s->frame_bits - 8 *
2025 (s->vbv_delay_ptr - s->pb.buf - 1);
2026 double bits = s->rc_context.buffer_index + minbits - inbits;
2029 av_log(s->avctx, AV_LOG_ERROR,
2030 "Internal error, negative bits\n");
2032 assert(s->repeat_first_field == 0);
2034 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
2035 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
2036 s->avctx->rc_max_rate;
2038 vbv_delay = FFMAX(vbv_delay, min_delay);
2040 av_assert0(vbv_delay < 0xFFFF);
2042 s->vbv_delay_ptr[0] &= 0xF8;
2043 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2044 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2045 s->vbv_delay_ptr[2] &= 0x07;
2046 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2048 props = av_cpb_properties_alloc(&props_size);
2050 return AVERROR(ENOMEM);
2051 props->vbv_delay = vbv_delay * 300;
2053 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2054 (uint8_t*)props, props_size);
2060 #if FF_API_VBV_DELAY
2061 FF_DISABLE_DEPRECATION_WARNINGS
2062 avctx->vbv_delay = vbv_delay * 300;
2063 FF_ENABLE_DEPRECATION_WARNINGS
2066 s->total_bits += s->frame_bits;
2067 #if FF_API_STAT_BITS
2068 FF_DISABLE_DEPRECATION_WARNINGS
2069 avctx->frame_bits = s->frame_bits;
2070 FF_ENABLE_DEPRECATION_WARNINGS
2074 pkt->pts = s->current_picture.f->pts;
2075 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2076 if (!s->current_picture.f->coded_picture_number)
2077 pkt->dts = pkt->pts - s->dts_delta;
2079 pkt->dts = s->reordered_pts;
2080 s->reordered_pts = pkt->pts;
2082 pkt->dts = pkt->pts;
2083 if (s->current_picture.f->key_frame)
2084 pkt->flags |= AV_PKT_FLAG_KEY;
2086 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2091 /* release non-reference frames */
2092 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2093 if (!s->picture[i].reference)
2094 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2097 av_assert1((s->frame_bits & 7) == 0);
2099 pkt->size = s->frame_bits / 8;
2100 *got_packet = !!pkt->size;
2104 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2105 int n, int threshold)
2107 static const char tab[64] = {
2108 3, 2, 2, 1, 1, 1, 1, 1,
2109 1, 1, 1, 1, 1, 1, 1, 1,
2110 1, 1, 1, 1, 1, 1, 1, 1,
2111 0, 0, 0, 0, 0, 0, 0, 0,
2112 0, 0, 0, 0, 0, 0, 0, 0,
2113 0, 0, 0, 0, 0, 0, 0, 0,
2114 0, 0, 0, 0, 0, 0, 0, 0,
2115 0, 0, 0, 0, 0, 0, 0, 0
2120 int16_t *block = s->block[n];
2121 const int last_index = s->block_last_index[n];
2124 if (threshold < 0) {
2126 threshold = -threshold;
2130 /* Are all we could set to zero already zero? */
2131 if (last_index <= skip_dc - 1)
2134 for (i = 0; i <= last_index; i++) {
2135 const int j = s->intra_scantable.permutated[i];
2136 const int level = FFABS(block[j]);
2138 if (skip_dc && i == 0)
2142 } else if (level > 1) {
2148 if (score >= threshold)
2150 for (i = skip_dc; i <= last_index; i++) {
2151 const int j = s->intra_scantable.permutated[i];
2155 s->block_last_index[n] = 0;
2157 s->block_last_index[n] = -1;
2160 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2164 const int maxlevel = s->max_qcoeff;
2165 const int minlevel = s->min_qcoeff;
2169 i = 1; // skip clipping of intra dc
2173 for (; i <= last_index; i++) {
2174 const int j = s->intra_scantable.permutated[i];
2175 int level = block[j];
2177 if (level > maxlevel) {
2180 } else if (level < minlevel) {
2188 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2189 av_log(s->avctx, AV_LOG_INFO,
2190 "warning, clipping %d dct coefficients to %d..%d\n",
2191 overflow, minlevel, maxlevel);
2194 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2198 for (y = 0; y < 8; y++) {
2199 for (x = 0; x < 8; x++) {
2205 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2206 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2207 int v = ptr[x2 + y2 * stride];
2213 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2218 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2219 int motion_x, int motion_y,
2220 int mb_block_height,
2224 int16_t weight[12][64];
2225 int16_t orig[12][64];
2226 const int mb_x = s->mb_x;
2227 const int mb_y = s->mb_y;
2230 int dct_offset = s->linesize * 8; // default for progressive frames
2231 int uv_dct_offset = s->uvlinesize * 8;
2232 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2233 ptrdiff_t wrap_y, wrap_c;
2235 for (i = 0; i < mb_block_count; i++)
2236 skip_dct[i] = s->skipdct;
2238 if (s->adaptive_quant) {
2239 const int last_qp = s->qscale;
2240 const int mb_xy = mb_x + mb_y * s->mb_stride;
2242 s->lambda = s->lambda_table[mb_xy];
2245 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2246 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2247 s->dquant = s->qscale - last_qp;
2249 if (s->out_format == FMT_H263) {
2250 s->dquant = av_clip(s->dquant, -2, 2);
2252 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2254 if (s->pict_type == AV_PICTURE_TYPE_B) {
2255 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2258 if (s->mv_type == MV_TYPE_8X8)
2264 ff_set_qscale(s, last_qp + s->dquant);
2265 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2266 ff_set_qscale(s, s->qscale + s->dquant);
2268 wrap_y = s->linesize;
2269 wrap_c = s->uvlinesize;
2270 ptr_y = s->new_picture.f->data[0] +
2271 (mb_y * 16 * wrap_y) + mb_x * 16;
2272 ptr_cb = s->new_picture.f->data[1] +
2273 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2274 ptr_cr = s->new_picture.f->data[2] +
2275 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2277 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2278 uint8_t *ebuf = s->sc.edge_emu_buffer + 36 * wrap_y;
2279 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2280 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2281 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2283 16, 16, mb_x * 16, mb_y * 16,
2284 s->width, s->height);
2286 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2288 mb_block_width, mb_block_height,
2289 mb_x * mb_block_width, mb_y * mb_block_height,
2291 ptr_cb = ebuf + 16 * wrap_y;
2292 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2294 mb_block_width, mb_block_height,
2295 mb_x * mb_block_width, mb_y * mb_block_height,
2297 ptr_cr = ebuf + 16 * wrap_y + 16;
2301 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2302 int progressive_score, interlaced_score;
2304 s->interlaced_dct = 0;
2305 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2306 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2307 NULL, wrap_y, 8) - 400;
2309 if (progressive_score > 0) {
2310 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2311 NULL, wrap_y * 2, 8) +
2312 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2313 NULL, wrap_y * 2, 8);
2314 if (progressive_score > interlaced_score) {
2315 s->interlaced_dct = 1;
2317 dct_offset = wrap_y;
2318 uv_dct_offset = wrap_c;
2320 if (s->chroma_format == CHROMA_422 ||
2321 s->chroma_format == CHROMA_444)
2327 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2328 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2329 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2330 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2332 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2336 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2337 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2338 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2339 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2340 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2341 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2342 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2343 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2344 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2345 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2346 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2347 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2351 op_pixels_func (*op_pix)[4];
2352 qpel_mc_func (*op_qpix)[16];
2353 uint8_t *dest_y, *dest_cb, *dest_cr;
2355 dest_y = s->dest[0];
2356 dest_cb = s->dest[1];
2357 dest_cr = s->dest[2];
2359 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2360 op_pix = s->hdsp.put_pixels_tab;
2361 op_qpix = s->qdsp.put_qpel_pixels_tab;
2363 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2364 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2367 if (s->mv_dir & MV_DIR_FORWARD) {
2368 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2369 s->last_picture.f->data,
2371 op_pix = s->hdsp.avg_pixels_tab;
2372 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2374 if (s->mv_dir & MV_DIR_BACKWARD) {
2375 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2376 s->next_picture.f->data,
2380 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2381 int progressive_score, interlaced_score;
2383 s->interlaced_dct = 0;
2384 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2385 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2389 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2390 progressive_score -= 400;
2392 if (progressive_score > 0) {
2393 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2395 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2399 if (progressive_score > interlaced_score) {
2400 s->interlaced_dct = 1;
2402 dct_offset = wrap_y;
2403 uv_dct_offset = wrap_c;
2405 if (s->chroma_format == CHROMA_422)
2411 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2412 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2413 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2414 dest_y + dct_offset, wrap_y);
2415 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2416 dest_y + dct_offset + 8, wrap_y);
2418 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2422 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2423 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2424 if (!s->chroma_y_shift) { /* 422 */
2425 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2426 dest_cb + uv_dct_offset, wrap_c);
2427 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2428 dest_cr + uv_dct_offset, wrap_c);
2431 /* pre quantization */
2432 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2433 2 * s->qscale * s->qscale) {
2435 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2437 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2439 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2440 wrap_y, 8) < 20 * s->qscale)
2442 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2443 wrap_y, 8) < 20 * s->qscale)
2445 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2447 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2449 if (!s->chroma_y_shift) { /* 422 */
2450 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2451 dest_cb + uv_dct_offset,
2452 wrap_c, 8) < 20 * s->qscale)
2454 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2455 dest_cr + uv_dct_offset,
2456 wrap_c, 8) < 20 * s->qscale)
2462 if (s->quantizer_noise_shaping) {
2464 get_visual_weight(weight[0], ptr_y , wrap_y);
2466 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2468 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2470 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2472 get_visual_weight(weight[4], ptr_cb , wrap_c);
2474 get_visual_weight(weight[5], ptr_cr , wrap_c);
2475 if (!s->chroma_y_shift) { /* 422 */
2477 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2480 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2483 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2486 /* DCT & quantize */
2487 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2489 for (i = 0; i < mb_block_count; i++) {
2492 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2493 // FIXME we could decide to change to quantizer instead of
2495 // JS: I don't think that would be a good idea it could lower
2496 // quality instead of improve it. Just INTRADC clipping
2497 // deserves changes in quantizer
2499 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2501 s->block_last_index[i] = -1;
2503 if (s->quantizer_noise_shaping) {
2504 for (i = 0; i < mb_block_count; i++) {
2506 s->block_last_index[i] =
2507 dct_quantize_refine(s, s->block[i], weight[i],
2508 orig[i], i, s->qscale);
2513 if (s->luma_elim_threshold && !s->mb_intra)
2514 for (i = 0; i < 4; i++)
2515 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2516 if (s->chroma_elim_threshold && !s->mb_intra)
2517 for (i = 4; i < mb_block_count; i++)
2518 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2520 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2521 for (i = 0; i < mb_block_count; i++) {
2522 if (s->block_last_index[i] == -1)
2523 s->coded_score[i] = INT_MAX / 256;
2528 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2529 s->block_last_index[4] =
2530 s->block_last_index[5] = 0;
2532 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2533 if (!s->chroma_y_shift) { /* 422 / 444 */
2534 for (i=6; i<12; i++) {
2535 s->block_last_index[i] = 0;
2536 s->block[i][0] = s->block[4][0];
2541 // non c quantize code returns incorrect block_last_index FIXME
2542 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2543 for (i = 0; i < mb_block_count; i++) {
2545 if (s->block_last_index[i] > 0) {
2546 for (j = 63; j > 0; j--) {
2547 if (s->block[i][s->intra_scantable.permutated[j]])
2550 s->block_last_index[i] = j;
2555 /* huffman encode */
2556 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2557 case AV_CODEC_ID_MPEG1VIDEO:
2558 case AV_CODEC_ID_MPEG2VIDEO:
2559 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2560 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2562 case AV_CODEC_ID_MPEG4:
2563 if (CONFIG_MPEG4_ENCODER)
2564 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2566 case AV_CODEC_ID_MSMPEG4V2:
2567 case AV_CODEC_ID_MSMPEG4V3:
2568 case AV_CODEC_ID_WMV1:
2569 if (CONFIG_MSMPEG4_ENCODER)
2570 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2572 case AV_CODEC_ID_WMV2:
2573 if (CONFIG_WMV2_ENCODER)
2574 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2576 case AV_CODEC_ID_H261:
2577 if (CONFIG_H261_ENCODER)
2578 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2580 case AV_CODEC_ID_H263:
2581 case AV_CODEC_ID_H263P:
2582 case AV_CODEC_ID_FLV1:
2583 case AV_CODEC_ID_RV10:
2584 case AV_CODEC_ID_RV20:
2585 if (CONFIG_H263_ENCODER)
2586 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2588 case AV_CODEC_ID_MJPEG:
2589 case AV_CODEC_ID_AMV:
2590 if (CONFIG_MJPEG_ENCODER)
2591 ff_mjpeg_encode_mb(s, s->block);
2598 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2600 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2601 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2602 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2605 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2608 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2611 d->mb_skip_run= s->mb_skip_run;
2613 d->last_dc[i] = s->last_dc[i];
2616 d->mv_bits= s->mv_bits;
2617 d->i_tex_bits= s->i_tex_bits;
2618 d->p_tex_bits= s->p_tex_bits;
2619 d->i_count= s->i_count;
2620 d->f_count= s->f_count;
2621 d->b_count= s->b_count;
2622 d->skip_count= s->skip_count;
2623 d->misc_bits= s->misc_bits;
2627 d->qscale= s->qscale;
2628 d->dquant= s->dquant;
2630 d->esc3_level_length= s->esc3_level_length;
2633 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2636 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2637 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2640 d->mb_skip_run= s->mb_skip_run;
2642 d->last_dc[i] = s->last_dc[i];
2645 d->mv_bits= s->mv_bits;
2646 d->i_tex_bits= s->i_tex_bits;
2647 d->p_tex_bits= s->p_tex_bits;
2648 d->i_count= s->i_count;
2649 d->f_count= s->f_count;
2650 d->b_count= s->b_count;
2651 d->skip_count= s->skip_count;
2652 d->misc_bits= s->misc_bits;
2654 d->mb_intra= s->mb_intra;
2655 d->mb_skipped= s->mb_skipped;
2656 d->mv_type= s->mv_type;
2657 d->mv_dir= s->mv_dir;
2659 if(s->data_partitioning){
2661 d->tex_pb= s->tex_pb;
2665 d->block_last_index[i]= s->block_last_index[i];
2666 d->interlaced_dct= s->interlaced_dct;
2667 d->qscale= s->qscale;
2669 d->esc3_level_length= s->esc3_level_length;
2672 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2673 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2674 int *dmin, int *next_block, int motion_x, int motion_y)
2677 uint8_t *dest_backup[3];
2679 copy_context_before_encode(s, backup, type);
2681 s->block= s->blocks[*next_block];
2682 s->pb= pb[*next_block];
2683 if(s->data_partitioning){
2684 s->pb2 = pb2 [*next_block];
2685 s->tex_pb= tex_pb[*next_block];
2689 memcpy(dest_backup, s->dest, sizeof(s->dest));
2690 s->dest[0] = s->sc.rd_scratchpad;
2691 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2692 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2693 av_assert0(s->linesize >= 32); //FIXME
2696 encode_mb(s, motion_x, motion_y);
2698 score= put_bits_count(&s->pb);
2699 if(s->data_partitioning){
2700 score+= put_bits_count(&s->pb2);
2701 score+= put_bits_count(&s->tex_pb);
2704 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2705 ff_mpv_decode_mb(s, s->block);
2707 score *= s->lambda2;
2708 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2712 memcpy(s->dest, dest_backup, sizeof(s->dest));
2719 copy_context_after_encode(best, s, type);
2723 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2724 uint32_t *sq = ff_square_tab + 256;
2729 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2730 else if(w==8 && h==8)
2731 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2735 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2744 static int sse_mb(MpegEncContext *s){
2748 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2749 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2752 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2753 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2754 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2755 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2757 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2758 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2759 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2762 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2763 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2764 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2767 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2768 MpegEncContext *s= *(void**)arg;
2772 s->me.dia_size= s->avctx->pre_dia_size;
2773 s->first_slice_line=1;
2774 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2775 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2776 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2778 s->first_slice_line=0;
2786 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2787 MpegEncContext *s= *(void**)arg;
2789 ff_check_alignment();
2791 s->me.dia_size= s->avctx->dia_size;
2792 s->first_slice_line=1;
2793 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2794 s->mb_x=0; //for block init below
2795 ff_init_block_index(s);
2796 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2797 s->block_index[0]+=2;
2798 s->block_index[1]+=2;
2799 s->block_index[2]+=2;
2800 s->block_index[3]+=2;
2802 /* compute motion vector & mb_type and store in context */
2803 if(s->pict_type==AV_PICTURE_TYPE_B)
2804 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2806 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2808 s->first_slice_line=0;
2813 static int mb_var_thread(AVCodecContext *c, void *arg){
2814 MpegEncContext *s= *(void**)arg;
2817 ff_check_alignment();
2819 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2820 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2823 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2825 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2827 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2828 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2830 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2831 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2832 s->me.mb_var_sum_temp += varc;
2838 static void write_slice_end(MpegEncContext *s){
2839 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2840 if(s->partitioned_frame){
2841 ff_mpeg4_merge_partitions(s);
2844 ff_mpeg4_stuffing(&s->pb);
2845 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2846 ff_mjpeg_encode_stuffing(s);
2849 avpriv_align_put_bits(&s->pb);
2850 flush_put_bits(&s->pb);
2852 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2853 s->misc_bits+= get_bits_diff(s);
2856 static void write_mb_info(MpegEncContext *s)
2858 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2859 int offset = put_bits_count(&s->pb);
2860 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2861 int gobn = s->mb_y / s->gob_index;
2863 if (CONFIG_H263_ENCODER)
2864 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2865 bytestream_put_le32(&ptr, offset);
2866 bytestream_put_byte(&ptr, s->qscale);
2867 bytestream_put_byte(&ptr, gobn);
2868 bytestream_put_le16(&ptr, mba);
2869 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2870 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2871 /* 4MV not implemented */
2872 bytestream_put_byte(&ptr, 0); /* hmv2 */
2873 bytestream_put_byte(&ptr, 0); /* vmv2 */
2876 static void update_mb_info(MpegEncContext *s, int startcode)
2880 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2881 s->mb_info_size += 12;
2882 s->prev_mb_info = s->last_mb_info;
2885 s->prev_mb_info = put_bits_count(&s->pb)/8;
2886 /* This might have incremented mb_info_size above, and we return without
2887 * actually writing any info into that slot yet. But in that case,
2888 * this will be called again at the start of the after writing the
2889 * start code, actually writing the mb info. */
2893 s->last_mb_info = put_bits_count(&s->pb)/8;
2894 if (!s->mb_info_size)
2895 s->mb_info_size += 12;
2899 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2901 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2902 && s->slice_context_count == 1
2903 && s->pb.buf == s->avctx->internal->byte_buffer) {
2904 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2905 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2907 uint8_t *new_buffer = NULL;
2908 int new_buffer_size = 0;
2910 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2911 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2912 return AVERROR(ENOMEM);
2917 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2918 s->avctx->internal->byte_buffer_size + size_increase);
2920 return AVERROR(ENOMEM);
2922 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2923 av_free(s->avctx->internal->byte_buffer);
2924 s->avctx->internal->byte_buffer = new_buffer;
2925 s->avctx->internal->byte_buffer_size = new_buffer_size;
2926 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2927 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2928 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2930 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2931 return AVERROR(EINVAL);
2935 static int encode_thread(AVCodecContext *c, void *arg){
2936 MpegEncContext *s= *(void**)arg;
2938 int chr_h= 16>>s->chroma_y_shift;
2940 MpegEncContext best_s = { 0 }, backup_s;
2941 uint8_t bit_buf[2][MAX_MB_BYTES];
2942 uint8_t bit_buf2[2][MAX_MB_BYTES];
2943 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2944 PutBitContext pb[2], pb2[2], tex_pb[2];
2946 ff_check_alignment();
2949 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2950 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2951 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2954 s->last_bits= put_bits_count(&s->pb);
2965 /* init last dc values */
2966 /* note: quant matrix value (8) is implied here */
2967 s->last_dc[i] = 128 << s->intra_dc_precision;
2969 s->current_picture.encoding_error[i] = 0;
2971 if(s->codec_id==AV_CODEC_ID_AMV){
2972 s->last_dc[0] = 128*8/13;
2973 s->last_dc[1] = 128*8/14;
2974 s->last_dc[2] = 128*8/14;
2977 memset(s->last_mv, 0, sizeof(s->last_mv));
2981 switch(s->codec_id){
2982 case AV_CODEC_ID_H263:
2983 case AV_CODEC_ID_H263P:
2984 case AV_CODEC_ID_FLV1:
2985 if (CONFIG_H263_ENCODER)
2986 s->gob_index = H263_GOB_HEIGHT(s->height);
2988 case AV_CODEC_ID_MPEG4:
2989 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2990 ff_mpeg4_init_partitions(s);
2996 s->first_slice_line = 1;
2997 s->ptr_lastgob = s->pb.buf;
2998 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
3002 ff_set_qscale(s, s->qscale);
3003 ff_init_block_index(s);
3005 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3006 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3007 int mb_type= s->mb_type[xy];
3011 int size_increase = s->avctx->internal->byte_buffer_size/4
3012 + s->mb_width*MAX_MB_BYTES;
3014 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
3015 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
3016 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3019 if(s->data_partitioning){
3020 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3021 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3022 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3028 s->mb_y = mb_y; // moved into loop, can get changed by H.261
3029 ff_update_block_index(s);
3031 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3032 ff_h261_reorder_mb_index(s);
3033 xy= s->mb_y*s->mb_stride + s->mb_x;
3034 mb_type= s->mb_type[xy];
3037 /* write gob / video packet header */
3039 int current_packet_size, is_gob_start;
3041 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3043 is_gob_start = s->rtp_payload_size &&
3044 current_packet_size >= s->rtp_payload_size &&
3047 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3049 switch(s->codec_id){
3050 case AV_CODEC_ID_H263:
3051 case AV_CODEC_ID_H263P:
3052 if(!s->h263_slice_structured)
3053 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3055 case AV_CODEC_ID_MPEG2VIDEO:
3056 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3057 case AV_CODEC_ID_MPEG1VIDEO:
3058 if(s->mb_skip_run) is_gob_start=0;
3060 case AV_CODEC_ID_MJPEG:
3061 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3066 if(s->start_mb_y != mb_y || mb_x!=0){
3069 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3070 ff_mpeg4_init_partitions(s);
3074 av_assert2((put_bits_count(&s->pb)&7) == 0);
3075 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3077 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3078 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3079 int d = 100 / s->error_rate;
3081 current_packet_size=0;
3082 s->pb.buf_ptr= s->ptr_lastgob;
3083 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3087 #if FF_API_RTP_CALLBACK
3088 FF_DISABLE_DEPRECATION_WARNINGS
3089 if (s->avctx->rtp_callback){
3090 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3091 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3093 FF_ENABLE_DEPRECATION_WARNINGS
3095 update_mb_info(s, 1);
3097 switch(s->codec_id){
3098 case AV_CODEC_ID_MPEG4:
3099 if (CONFIG_MPEG4_ENCODER) {
3100 ff_mpeg4_encode_video_packet_header(s);
3101 ff_mpeg4_clean_buffers(s);
3104 case AV_CODEC_ID_MPEG1VIDEO:
3105 case AV_CODEC_ID_MPEG2VIDEO:
3106 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3107 ff_mpeg1_encode_slice_header(s);
3108 ff_mpeg1_clean_buffers(s);
3111 case AV_CODEC_ID_H263:
3112 case AV_CODEC_ID_H263P:
3113 if (CONFIG_H263_ENCODER)
3114 ff_h263_encode_gob_header(s, mb_y);
3118 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3119 int bits= put_bits_count(&s->pb);
3120 s->misc_bits+= bits - s->last_bits;
3124 s->ptr_lastgob += current_packet_size;
3125 s->first_slice_line=1;
3126 s->resync_mb_x=mb_x;
3127 s->resync_mb_y=mb_y;
3131 if( (s->resync_mb_x == s->mb_x)
3132 && s->resync_mb_y+1 == s->mb_y){
3133 s->first_slice_line=0;
3137 s->dquant=0; //only for QP_RD
3139 update_mb_info(s, 0);
3141 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3143 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3145 copy_context_before_encode(&backup_s, s, -1);
3147 best_s.data_partitioning= s->data_partitioning;
3148 best_s.partitioned_frame= s->partitioned_frame;
3149 if(s->data_partitioning){
3150 backup_s.pb2= s->pb2;
3151 backup_s.tex_pb= s->tex_pb;
3154 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3155 s->mv_dir = MV_DIR_FORWARD;
3156 s->mv_type = MV_TYPE_16X16;
3158 s->mv[0][0][0] = s->p_mv_table[xy][0];
3159 s->mv[0][0][1] = s->p_mv_table[xy][1];
3160 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3161 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3163 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3164 s->mv_dir = MV_DIR_FORWARD;
3165 s->mv_type = MV_TYPE_FIELD;
3168 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3169 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3170 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3172 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3173 &dmin, &next_block, 0, 0);
3175 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3176 s->mv_dir = MV_DIR_FORWARD;
3177 s->mv_type = MV_TYPE_16X16;
3181 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3182 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3184 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3185 s->mv_dir = MV_DIR_FORWARD;
3186 s->mv_type = MV_TYPE_8X8;
3189 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3190 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3192 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3193 &dmin, &next_block, 0, 0);
3195 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3196 s->mv_dir = MV_DIR_FORWARD;
3197 s->mv_type = MV_TYPE_16X16;
3199 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3200 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3201 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3202 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3204 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3205 s->mv_dir = MV_DIR_BACKWARD;
3206 s->mv_type = MV_TYPE_16X16;
3208 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3209 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3210 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3211 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3213 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3214 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3215 s->mv_type = MV_TYPE_16X16;
3217 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3218 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3219 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3220 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3221 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3222 &dmin, &next_block, 0, 0);
3224 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3225 s->mv_dir = MV_DIR_FORWARD;
3226 s->mv_type = MV_TYPE_FIELD;
3229 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3230 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3231 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3233 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3234 &dmin, &next_block, 0, 0);
3236 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3237 s->mv_dir = MV_DIR_BACKWARD;
3238 s->mv_type = MV_TYPE_FIELD;
3241 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3242 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3243 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3245 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3246 &dmin, &next_block, 0, 0);
3248 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3249 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3250 s->mv_type = MV_TYPE_FIELD;
3252 for(dir=0; dir<2; dir++){
3254 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3255 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3256 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3259 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3260 &dmin, &next_block, 0, 0);
3262 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3264 s->mv_type = MV_TYPE_16X16;
3268 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3269 &dmin, &next_block, 0, 0);
3270 if(s->h263_pred || s->h263_aic){
3272 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3274 ff_clean_intra_table_entries(s); //old mode?
3278 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3279 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3280 const int last_qp= backup_s.qscale;
3283 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3284 static const int dquant_tab[4]={-1,1,-2,2};
3285 int storecoefs = s->mb_intra && s->dc_val[0];
3287 av_assert2(backup_s.dquant == 0);
3290 s->mv_dir= best_s.mv_dir;
3291 s->mv_type = MV_TYPE_16X16;
3292 s->mb_intra= best_s.mb_intra;
3293 s->mv[0][0][0] = best_s.mv[0][0][0];
3294 s->mv[0][0][1] = best_s.mv[0][0][1];
3295 s->mv[1][0][0] = best_s.mv[1][0][0];
3296 s->mv[1][0][1] = best_s.mv[1][0][1];
3298 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3299 for(; qpi<4; qpi++){
3300 int dquant= dquant_tab[qpi];
3301 qp= last_qp + dquant;
3302 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3304 backup_s.dquant= dquant;
3307 dc[i]= s->dc_val[0][ s->block_index[i] ];
3308 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3312 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3313 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3314 if(best_s.qscale != qp){
3317 s->dc_val[0][ s->block_index[i] ]= dc[i];
3318 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3325 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3326 int mx= s->b_direct_mv_table[xy][0];
3327 int my= s->b_direct_mv_table[xy][1];
3329 backup_s.dquant = 0;
3330 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3332 ff_mpeg4_set_direct_mv(s, mx, my);
3333 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3334 &dmin, &next_block, mx, my);
3336 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3337 backup_s.dquant = 0;
3338 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3340 ff_mpeg4_set_direct_mv(s, 0, 0);
3341 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3342 &dmin, &next_block, 0, 0);
3344 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3347 coded |= s->block_last_index[i];
3350 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3351 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3352 mx=my=0; //FIXME find the one we actually used
3353 ff_mpeg4_set_direct_mv(s, mx, my);
3354 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3362 s->mv_dir= best_s.mv_dir;
3363 s->mv_type = best_s.mv_type;
3365 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3366 s->mv[0][0][1] = best_s.mv[0][0][1];
3367 s->mv[1][0][0] = best_s.mv[1][0][0];
3368 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3371 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3372 &dmin, &next_block, mx, my);
3377 s->current_picture.qscale_table[xy] = best_s.qscale;
3379 copy_context_after_encode(s, &best_s, -1);
3381 pb_bits_count= put_bits_count(&s->pb);
3382 flush_put_bits(&s->pb);
3383 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3386 if(s->data_partitioning){
3387 pb2_bits_count= put_bits_count(&s->pb2);
3388 flush_put_bits(&s->pb2);
3389 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3390 s->pb2= backup_s.pb2;
3392 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3393 flush_put_bits(&s->tex_pb);
3394 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3395 s->tex_pb= backup_s.tex_pb;
3397 s->last_bits= put_bits_count(&s->pb);
3399 if (CONFIG_H263_ENCODER &&
3400 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3401 ff_h263_update_motion_val(s);
3403 if(next_block==0){ //FIXME 16 vs linesize16
3404 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3405 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3406 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3409 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3410 ff_mpv_decode_mb(s, s->block);
3412 int motion_x = 0, motion_y = 0;
3413 s->mv_type=MV_TYPE_16X16;
3414 // only one MB-Type possible
3417 case CANDIDATE_MB_TYPE_INTRA:
3420 motion_x= s->mv[0][0][0] = 0;
3421 motion_y= s->mv[0][0][1] = 0;
3423 case CANDIDATE_MB_TYPE_INTER:
3424 s->mv_dir = MV_DIR_FORWARD;
3426 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3427 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3429 case CANDIDATE_MB_TYPE_INTER_I:
3430 s->mv_dir = MV_DIR_FORWARD;
3431 s->mv_type = MV_TYPE_FIELD;
3434 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3435 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3436 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3439 case CANDIDATE_MB_TYPE_INTER4V:
3440 s->mv_dir = MV_DIR_FORWARD;
3441 s->mv_type = MV_TYPE_8X8;
3444 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3445 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3448 case CANDIDATE_MB_TYPE_DIRECT:
3449 if (CONFIG_MPEG4_ENCODER) {
3450 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3452 motion_x=s->b_direct_mv_table[xy][0];
3453 motion_y=s->b_direct_mv_table[xy][1];
3454 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3457 case CANDIDATE_MB_TYPE_DIRECT0:
3458 if (CONFIG_MPEG4_ENCODER) {
3459 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3461 ff_mpeg4_set_direct_mv(s, 0, 0);
3464 case CANDIDATE_MB_TYPE_BIDIR:
3465 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3467 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3468 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3469 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3470 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3472 case CANDIDATE_MB_TYPE_BACKWARD:
3473 s->mv_dir = MV_DIR_BACKWARD;
3475 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3476 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3478 case CANDIDATE_MB_TYPE_FORWARD:
3479 s->mv_dir = MV_DIR_FORWARD;
3481 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3482 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3484 case CANDIDATE_MB_TYPE_FORWARD_I:
3485 s->mv_dir = MV_DIR_FORWARD;
3486 s->mv_type = MV_TYPE_FIELD;
3489 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3490 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3491 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3494 case CANDIDATE_MB_TYPE_BACKWARD_I:
3495 s->mv_dir = MV_DIR_BACKWARD;
3496 s->mv_type = MV_TYPE_FIELD;
3499 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3500 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3501 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3504 case CANDIDATE_MB_TYPE_BIDIR_I:
3505 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3506 s->mv_type = MV_TYPE_FIELD;
3508 for(dir=0; dir<2; dir++){
3510 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3511 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3512 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3517 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3520 encode_mb(s, motion_x, motion_y);
3522 // RAL: Update last macroblock type
3523 s->last_mv_dir = s->mv_dir;
3525 if (CONFIG_H263_ENCODER &&
3526 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3527 ff_h263_update_motion_val(s);
3529 ff_mpv_decode_mb(s, s->block);
3532 /* clean the MV table in IPS frames for direct mode in B-frames */
3533 if(s->mb_intra /* && I,P,S_TYPE */){
3534 s->p_mv_table[xy][0]=0;
3535 s->p_mv_table[xy][1]=0;
3538 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3542 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3543 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3545 s->current_picture.encoding_error[0] += sse(
3546 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3547 s->dest[0], w, h, s->linesize);
3548 s->current_picture.encoding_error[1] += sse(
3549 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3550 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3551 s->current_picture.encoding_error[2] += sse(
3552 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3553 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3556 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3557 ff_h263_loop_filter(s);
3559 ff_dlog(s->avctx, "MB %d %d bits\n",
3560 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3564 //not beautiful here but we must write it before flushing so it has to be here
3565 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3566 ff_msmpeg4_encode_ext_header(s);
3570 #if FF_API_RTP_CALLBACK
3571 FF_DISABLE_DEPRECATION_WARNINGS
3572 /* Send the last GOB if RTP */
3573 if (s->avctx->rtp_callback) {
3574 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3575 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3576 /* Call the RTP callback to send the last GOB */
3578 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3580 FF_ENABLE_DEPRECATION_WARNINGS
3586 #define MERGE(field) dst->field += src->field; src->field=0
3587 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3588 MERGE(me.scene_change_score);
3589 MERGE(me.mc_mb_var_sum_temp);
3590 MERGE(me.mb_var_sum_temp);
3593 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3596 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3597 MERGE(dct_count[1]);
3606 MERGE(er.error_count);
3607 MERGE(padding_bug_score);
3608 MERGE(current_picture.encoding_error[0]);
3609 MERGE(current_picture.encoding_error[1]);
3610 MERGE(current_picture.encoding_error[2]);
3612 if (dst->noise_reduction){
3613 for(i=0; i<64; i++){
3614 MERGE(dct_error_sum[0][i]);
3615 MERGE(dct_error_sum[1][i]);
3619 assert(put_bits_count(&src->pb) % 8 ==0);
3620 assert(put_bits_count(&dst->pb) % 8 ==0);
3621 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3622 flush_put_bits(&dst->pb);
3625 static int estimate_qp(MpegEncContext *s, int dry_run){
3626 if (s->next_lambda){
3627 s->current_picture_ptr->f->quality =
3628 s->current_picture.f->quality = s->next_lambda;
3629 if(!dry_run) s->next_lambda= 0;
3630 } else if (!s->fixed_qscale) {
3631 s->current_picture_ptr->f->quality =
3632 s->current_picture.f->quality = ff_rate_estimate_qscale(s, dry_run);
3633 if (s->current_picture.f->quality < 0)
3637 if(s->adaptive_quant){
3638 switch(s->codec_id){
3639 case AV_CODEC_ID_MPEG4:
3640 if (CONFIG_MPEG4_ENCODER)
3641 ff_clean_mpeg4_qscales(s);
3643 case AV_CODEC_ID_H263:
3644 case AV_CODEC_ID_H263P:
3645 case AV_CODEC_ID_FLV1:
3646 if (CONFIG_H263_ENCODER)
3647 ff_clean_h263_qscales(s);
3650 ff_init_qscale_tab(s);
3653 s->lambda= s->lambda_table[0];
3656 s->lambda = s->current_picture.f->quality;
3661 /* must be called before writing the header */
3662 static void set_frame_distances(MpegEncContext * s){
3663 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3664 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3666 if(s->pict_type==AV_PICTURE_TYPE_B){
3667 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3668 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3670 s->pp_time= s->time - s->last_non_b_time;
3671 s->last_non_b_time= s->time;
3672 assert(s->picture_number==0 || s->pp_time > 0);
3676 static int encode_picture(MpegEncContext *s, int picture_number)
3680 int context_count = s->slice_context_count;
3682 s->picture_number = picture_number;
3684 /* Reset the average MB variance */
3685 s->me.mb_var_sum_temp =
3686 s->me.mc_mb_var_sum_temp = 0;
3688 /* we need to initialize some time vars before we can encode B-frames */
3689 // RAL: Condition added for MPEG1VIDEO
3690 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3691 set_frame_distances(s);
3692 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3693 ff_set_mpeg4_time(s);
3695 s->me.scene_change_score=0;
3697 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3699 if(s->pict_type==AV_PICTURE_TYPE_I){
3700 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3701 else s->no_rounding=0;
3702 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3703 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3704 s->no_rounding ^= 1;
3707 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3708 if (estimate_qp(s,1) < 0)
3710 ff_get_2pass_fcode(s);
3711 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3712 if(s->pict_type==AV_PICTURE_TYPE_B)
3713 s->lambda= s->last_lambda_for[s->pict_type];
3715 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3719 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3720 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3721 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3722 s->q_chroma_intra_matrix = s->q_intra_matrix;
3723 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3726 s->mb_intra=0; //for the rate distortion & bit compare functions
3727 for(i=1; i<context_count; i++){
3728 ret = ff_update_duplicate_context(s->thread_context[i], s);
3736 /* Estimate motion for every MB */
3737 if(s->pict_type != AV_PICTURE_TYPE_I){
3738 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3739 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3740 if (s->pict_type != AV_PICTURE_TYPE_B) {
3741 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3743 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3747 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3748 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3750 for(i=0; i<s->mb_stride*s->mb_height; i++)
3751 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3753 if(!s->fixed_qscale){
3754 /* finding spatial complexity for I-frame rate control */
3755 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3758 for(i=1; i<context_count; i++){
3759 merge_context_after_me(s, s->thread_context[i]);
3761 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3762 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3765 if (s->me.scene_change_score > s->scenechange_threshold &&
3766 s->pict_type == AV_PICTURE_TYPE_P) {
3767 s->pict_type= AV_PICTURE_TYPE_I;
3768 for(i=0; i<s->mb_stride*s->mb_height; i++)
3769 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3770 if(s->msmpeg4_version >= 3)
3772 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3773 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3777 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3778 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3780 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3782 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3783 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3784 s->f_code= FFMAX3(s->f_code, a, b);
3787 ff_fix_long_p_mvs(s);
3788 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3789 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3793 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3794 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3799 if(s->pict_type==AV_PICTURE_TYPE_B){
3802 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3803 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3804 s->f_code = FFMAX(a, b);
3806 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3807 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3808 s->b_code = FFMAX(a, b);
3810 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3811 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3812 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3813 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3814 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3816 for(dir=0; dir<2; dir++){
3819 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3820 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3821 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3822 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3830 if (estimate_qp(s, 0) < 0)
3833 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3834 s->pict_type == AV_PICTURE_TYPE_I &&
3835 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3836 s->qscale= 3; //reduce clipping problems
3838 if (s->out_format == FMT_MJPEG) {
3839 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3840 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3842 if (s->avctx->intra_matrix) {
3844 luma_matrix = s->avctx->intra_matrix;
3846 if (s->avctx->chroma_intra_matrix)
3847 chroma_matrix = s->avctx->chroma_intra_matrix;
3849 /* for mjpeg, we do include qscale in the matrix */
3851 int j = s->idsp.idct_permutation[i];
3853 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3854 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3856 s->y_dc_scale_table=
3857 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3858 s->chroma_intra_matrix[0] =
3859 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3860 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3861 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3862 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3863 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3866 if(s->codec_id == AV_CODEC_ID_AMV){
3867 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3868 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3870 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3872 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3873 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3875 s->y_dc_scale_table= y;
3876 s->c_dc_scale_table= c;
3877 s->intra_matrix[0] = 13;
3878 s->chroma_intra_matrix[0] = 14;
3879 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3880 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3881 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3882 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3886 //FIXME var duplication
3887 s->current_picture_ptr->f->key_frame =
3888 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3889 s->current_picture_ptr->f->pict_type =
3890 s->current_picture.f->pict_type = s->pict_type;
3892 if (s->current_picture.f->key_frame)
3893 s->picture_in_gop_number=0;
3895 s->mb_x = s->mb_y = 0;
3896 s->last_bits= put_bits_count(&s->pb);
3897 switch(s->out_format) {
3899 if (CONFIG_MJPEG_ENCODER)
3900 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3901 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3904 if (CONFIG_H261_ENCODER)
3905 ff_h261_encode_picture_header(s, picture_number);
3908 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3909 ff_wmv2_encode_picture_header(s, picture_number);
3910 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3911 ff_msmpeg4_encode_picture_header(s, picture_number);
3912 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3913 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3916 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3917 ret = ff_rv10_encode_picture_header(s, picture_number);
3921 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3922 ff_rv20_encode_picture_header(s, picture_number);
3923 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3924 ff_flv_encode_picture_header(s, picture_number);
3925 else if (CONFIG_H263_ENCODER)
3926 ff_h263_encode_picture_header(s, picture_number);
3929 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3930 ff_mpeg1_encode_picture_header(s, picture_number);
3935 bits= put_bits_count(&s->pb);
3936 s->header_bits= bits - s->last_bits;
3938 for(i=1; i<context_count; i++){
3939 update_duplicate_context_after_me(s->thread_context[i], s);
3941 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3942 for(i=1; i<context_count; i++){
3943 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3944 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3945 merge_context_after_encode(s, s->thread_context[i]);
3951 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3952 const int intra= s->mb_intra;
3955 s->dct_count[intra]++;
3957 for(i=0; i<64; i++){
3958 int level= block[i];
3962 s->dct_error_sum[intra][i] += level;
3963 level -= s->dct_offset[intra][i];
3964 if(level<0) level=0;
3966 s->dct_error_sum[intra][i] -= level;
3967 level += s->dct_offset[intra][i];
3968 if(level>0) level=0;
3975 static int dct_quantize_trellis_c(MpegEncContext *s,
3976 int16_t *block, int n,
3977 int qscale, int *overflow){
3979 const uint16_t *matrix;
3980 const uint8_t *scantable= s->intra_scantable.scantable;
3981 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3983 unsigned int threshold1, threshold2;
3995 int coeff_count[64];
3996 int qmul, qadd, start_i, last_non_zero, i, dc;
3997 const int esc_length= s->ac_esc_length;
3999 uint8_t * last_length;
4000 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4003 s->fdsp.fdct(block);
4005 if(s->dct_error_sum)
4006 s->denoise_dct(s, block);
4008 qadd= ((qscale-1)|1)*8;
4010 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4011 else mpeg2_qscale = qscale << 1;
4022 /* For AIC we skip quant/dequant of INTRADC */
4027 /* note: block[0] is assumed to be positive */
4028 block[0] = (block[0] + (q >> 1)) / q;
4031 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4032 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4033 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4034 bias= 1<<(QMAT_SHIFT-1);
4036 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4037 length = s->intra_chroma_ac_vlc_length;
4038 last_length= s->intra_chroma_ac_vlc_last_length;
4040 length = s->intra_ac_vlc_length;
4041 last_length= s->intra_ac_vlc_last_length;
4046 qmat = s->q_inter_matrix[qscale];
4047 matrix = s->inter_matrix;
4048 length = s->inter_ac_vlc_length;
4049 last_length= s->inter_ac_vlc_last_length;
4053 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4054 threshold2= (threshold1<<1);
4056 for(i=63; i>=start_i; i--) {
4057 const int j = scantable[i];
4058 int level = block[j] * qmat[j];
4060 if(((unsigned)(level+threshold1))>threshold2){
4066 for(i=start_i; i<=last_non_zero; i++) {
4067 const int j = scantable[i];
4068 int level = block[j] * qmat[j];
4070 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4071 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4072 if(((unsigned)(level+threshold1))>threshold2){
4074 level= (bias + level)>>QMAT_SHIFT;
4076 coeff[1][i]= level-1;
4077 // coeff[2][k]= level-2;
4079 level= (bias - level)>>QMAT_SHIFT;
4080 coeff[0][i]= -level;
4081 coeff[1][i]= -level+1;
4082 // coeff[2][k]= -level+2;
4084 coeff_count[i]= FFMIN(level, 2);
4085 av_assert2(coeff_count[i]);
4088 coeff[0][i]= (level>>31)|1;
4093 *overflow= s->max_qcoeff < max; //overflow might have happened
4095 if(last_non_zero < start_i){
4096 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4097 return last_non_zero;
4100 score_tab[start_i]= 0;
4101 survivor[0]= start_i;
4104 for(i=start_i; i<=last_non_zero; i++){
4105 int level_index, j, zero_distortion;
4106 int dct_coeff= FFABS(block[ scantable[i] ]);
4107 int best_score=256*256*256*120;
4109 if (s->fdsp.fdct == ff_fdct_ifast)
4110 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4111 zero_distortion= dct_coeff*dct_coeff;
4113 for(level_index=0; level_index < coeff_count[i]; level_index++){
4115 int level= coeff[level_index][i];
4116 const int alevel= FFABS(level);
4121 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4122 unquant_coeff= alevel*qmul + qadd;
4123 } else if(s->out_format == FMT_MJPEG) {
4124 j = s->idsp.idct_permutation[scantable[i]];
4125 unquant_coeff = alevel * matrix[j] * 8;
4127 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4129 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4130 unquant_coeff = (unquant_coeff - 1) | 1;
4132 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4133 unquant_coeff = (unquant_coeff - 1) | 1;
4138 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4140 if((level&(~127)) == 0){
4141 for(j=survivor_count-1; j>=0; j--){
4142 int run= i - survivor[j];
4143 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4144 score += score_tab[i-run];
4146 if(score < best_score){
4149 level_tab[i+1]= level-64;
4153 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4154 for(j=survivor_count-1; j>=0; j--){
4155 int run= i - survivor[j];
4156 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4157 score += score_tab[i-run];
4158 if(score < last_score){
4161 last_level= level-64;
4167 distortion += esc_length*lambda;
4168 for(j=survivor_count-1; j>=0; j--){
4169 int run= i - survivor[j];
4170 int score= distortion + score_tab[i-run];
4172 if(score < best_score){
4175 level_tab[i+1]= level-64;
4179 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4180 for(j=survivor_count-1; j>=0; j--){
4181 int run= i - survivor[j];
4182 int score= distortion + score_tab[i-run];
4183 if(score < last_score){
4186 last_level= level-64;
4194 score_tab[i+1]= best_score;
4196 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4197 if(last_non_zero <= 27){
4198 for(; survivor_count; survivor_count--){
4199 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4203 for(; survivor_count; survivor_count--){
4204 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4209 survivor[ survivor_count++ ]= i+1;
4212 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4213 last_score= 256*256*256*120;
4214 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4215 int score= score_tab[i];
4217 score += lambda * 2; // FIXME more exact?
4219 if(score < last_score){
4222 last_level= level_tab[i];
4223 last_run= run_tab[i];
4228 s->coded_score[n] = last_score;
4230 dc= FFABS(block[0]);
4231 last_non_zero= last_i - 1;
4232 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4234 if(last_non_zero < start_i)
4235 return last_non_zero;
4237 if(last_non_zero == 0 && start_i == 0){
4239 int best_score= dc * dc;
4241 for(i=0; i<coeff_count[0]; i++){
4242 int level= coeff[i][0];
4243 int alevel= FFABS(level);
4244 int unquant_coeff, score, distortion;
4246 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4247 unquant_coeff= (alevel*qmul + qadd)>>3;
4249 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4250 unquant_coeff = (unquant_coeff - 1) | 1;
4252 unquant_coeff = (unquant_coeff + 4) >> 3;
4253 unquant_coeff<<= 3 + 3;
4255 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4257 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4258 else score= distortion + esc_length*lambda;
4260 if(score < best_score){
4262 best_level= level - 64;
4265 block[0]= best_level;
4266 s->coded_score[n] = best_score - dc*dc;
4267 if(best_level == 0) return -1;
4268 else return last_non_zero;
4272 av_assert2(last_level);
4274 block[ perm_scantable[last_non_zero] ]= last_level;
4277 for(; i>start_i; i -= run_tab[i] + 1){
4278 block[ perm_scantable[i-1] ]= level_tab[i];
4281 return last_non_zero;
4284 //#define REFINE_STATS 1
4285 static int16_t basis[64][64];
4287 static void build_basis(uint8_t *perm){
4294 double s= 0.25*(1<<BASIS_SHIFT);
4296 int perm_index= perm[index];
4297 if(i==0) s*= sqrt(0.5);
4298 if(j==0) s*= sqrt(0.5);
4299 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4306 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4307 int16_t *block, int16_t *weight, int16_t *orig,
4310 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4311 const uint8_t *scantable= s->intra_scantable.scantable;
4312 const uint8_t *perm_scantable= s->intra_scantable.permutated;
4313 // unsigned int threshold1, threshold2;
4318 int qmul, qadd, start_i, last_non_zero, i, dc;
4320 uint8_t * last_length;
4322 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4325 static int after_last=0;
4326 static int to_zero=0;
4327 static int from_zero=0;
4330 static int messed_sign=0;
4333 if(basis[0][0] == 0)
4334 build_basis(s->idsp.idct_permutation);
4345 /* For AIC we skip quant/dequant of INTRADC */
4349 q <<= RECON_SHIFT-3;
4350 /* note: block[0] is assumed to be positive */
4352 // block[0] = (block[0] + (q >> 1)) / q;
4354 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4355 // bias= 1<<(QMAT_SHIFT-1);
4356 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4357 length = s->intra_chroma_ac_vlc_length;
4358 last_length= s->intra_chroma_ac_vlc_last_length;
4360 length = s->intra_ac_vlc_length;
4361 last_length= s->intra_ac_vlc_last_length;
4366 length = s->inter_ac_vlc_length;
4367 last_length= s->inter_ac_vlc_last_length;
4369 last_non_zero = s->block_last_index[n];
4374 dc += (1<<(RECON_SHIFT-1));
4375 for(i=0; i<64; i++){
4376 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4379 STOP_TIMER("memset rem[]")}
4382 for(i=0; i<64; i++){
4387 w= FFABS(weight[i]) + qns*one;
4388 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4391 // w=weight[i] = (63*qns + (w/2)) / w;
4394 av_assert2(w<(1<<6));
4397 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4403 for(i=start_i; i<=last_non_zero; i++){
4404 int j= perm_scantable[i];
4405 const int level= block[j];
4409 if(level<0) coeff= qmul*level - qadd;
4410 else coeff= qmul*level + qadd;
4411 run_tab[rle_index++]=run;
4414 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4420 if(last_non_zero>0){
4421 STOP_TIMER("init rem[]")
4428 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4431 int run2, best_unquant_change=0, analyze_gradient;
4435 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4437 if(analyze_gradient){
4441 for(i=0; i<64; i++){
4444 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4447 STOP_TIMER("rem*w*w")}
4457 const int level= block[0];
4458 int change, old_coeff;
4460 av_assert2(s->mb_intra);
4464 for(change=-1; change<=1; change+=2){
4465 int new_level= level + change;
4466 int score, new_coeff;
4468 new_coeff= q*new_level;
4469 if(new_coeff >= 2048 || new_coeff < 0)
4472 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4473 new_coeff - old_coeff);
4474 if(score<best_score){
4477 best_change= change;
4478 best_unquant_change= new_coeff - old_coeff;
4485 run2= run_tab[rle_index++];
4489 for(i=start_i; i<64; i++){
4490 int j= perm_scantable[i];
4491 const int level= block[j];
4492 int change, old_coeff;
4494 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4498 if(level<0) old_coeff= qmul*level - qadd;
4499 else old_coeff= qmul*level + qadd;
4500 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4504 av_assert2(run2>=0 || i >= last_non_zero );
4507 for(change=-1; change<=1; change+=2){
4508 int new_level= level + change;
4509 int score, new_coeff, unquant_change;
4512 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4516 if(new_level<0) new_coeff= qmul*new_level - qadd;
4517 else new_coeff= qmul*new_level + qadd;
4518 if(new_coeff >= 2048 || new_coeff <= -2048)
4520 //FIXME check for overflow
4523 if(level < 63 && level > -63){
4524 if(i < last_non_zero)
4525 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4526 - length[UNI_AC_ENC_INDEX(run, level+64)];
4528 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4529 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4532 av_assert2(FFABS(new_level)==1);
4534 if(analyze_gradient){
4535 int g= d1[ scantable[i] ];
4536 if(g && (g^new_level) >= 0)
4540 if(i < last_non_zero){
4541 int next_i= i + run2 + 1;
4542 int next_level= block[ perm_scantable[next_i] ] + 64;
4544 if(next_level&(~127))
4547 if(next_i < last_non_zero)
4548 score += length[UNI_AC_ENC_INDEX(run, 65)]
4549 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4550 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4552 score += length[UNI_AC_ENC_INDEX(run, 65)]
4553 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4554 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4556 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4558 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4559 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4565 av_assert2(FFABS(level)==1);
4567 if(i < last_non_zero){
4568 int next_i= i + run2 + 1;
4569 int next_level= block[ perm_scantable[next_i] ] + 64;
4571 if(next_level&(~127))
4574 if(next_i < last_non_zero)
4575 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4576 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4577 - length[UNI_AC_ENC_INDEX(run, 65)];
4579 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4580 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4581 - length[UNI_AC_ENC_INDEX(run, 65)];
4583 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4585 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4586 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4593 unquant_change= new_coeff - old_coeff;
4594 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4596 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4598 if(score<best_score){
4601 best_change= change;
4602 best_unquant_change= unquant_change;
4606 prev_level= level + 64;
4607 if(prev_level&(~127))
4616 STOP_TIMER("iterative step")}
4620 int j= perm_scantable[ best_coeff ];
4622 block[j] += best_change;
4624 if(best_coeff > last_non_zero){
4625 last_non_zero= best_coeff;
4626 av_assert2(block[j]);
4633 if(block[j] - best_change){
4634 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4646 for(; last_non_zero>=start_i; last_non_zero--){
4647 if(block[perm_scantable[last_non_zero]])
4653 if(256*256*256*64 % count == 0){
4654 av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4659 for(i=start_i; i<=last_non_zero; i++){
4660 int j= perm_scantable[i];
4661 const int level= block[j];
4664 run_tab[rle_index++]=run;
4671 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4677 if(last_non_zero>0){
4678 STOP_TIMER("iterative search")
4683 return last_non_zero;
4687 * Permute an 8x8 block according to permutation.
4688 * @param block the block which will be permuted according to
4689 * the given permutation vector
4690 * @param permutation the permutation vector
4691 * @param last the last non zero coefficient in scantable order, used to
4692 * speed the permutation up
4693 * @param scantable the used scantable, this is only used to speed the
4694 * permutation up, the block is not (inverse) permutated
4695 * to scantable order!
4697 void ff_block_permute(int16_t *block, uint8_t *permutation,
4698 const uint8_t *scantable, int last)
4705 //FIXME it is ok but not clean and might fail for some permutations
4706 // if (permutation[1] == 1)
4709 for (i = 0; i <= last; i++) {
4710 const int j = scantable[i];
4715 for (i = 0; i <= last; i++) {
4716 const int j = scantable[i];
4717 const int perm_j = permutation[j];
4718 block[perm_j] = temp[j];
4722 int ff_dct_quantize_c(MpegEncContext *s,
4723 int16_t *block, int n,
4724 int qscale, int *overflow)
4726 int i, j, level, last_non_zero, q, start_i;
4728 const uint8_t *scantable= s->intra_scantable.scantable;
4731 unsigned int threshold1, threshold2;
4733 s->fdsp.fdct(block);
4735 if(s->dct_error_sum)
4736 s->denoise_dct(s, block);
4746 /* For AIC we skip quant/dequant of INTRADC */
4749 /* note: block[0] is assumed to be positive */
4750 block[0] = (block[0] + (q >> 1)) / q;
4753 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4754 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4758 qmat = s->q_inter_matrix[qscale];
4759 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4761 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4762 threshold2= (threshold1<<1);
4763 for(i=63;i>=start_i;i--) {
4765 level = block[j] * qmat[j];
4767 if(((unsigned)(level+threshold1))>threshold2){
4774 for(i=start_i; i<=last_non_zero; i++) {
4776 level = block[j] * qmat[j];
4778 // if( bias+level >= (1<<QMAT_SHIFT)
4779 // || bias-level >= (1<<QMAT_SHIFT)){
4780 if(((unsigned)(level+threshold1))>threshold2){
4782 level= (bias + level)>>QMAT_SHIFT;
4785 level= (bias - level)>>QMAT_SHIFT;
4793 *overflow= s->max_qcoeff < max; //overflow might have happened
4795 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4796 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4797 ff_block_permute(block, s->idsp.idct_permutation,
4798 scantable, last_non_zero);
4800 return last_non_zero;
4803 #define OFFSET(x) offsetof(MpegEncContext, x)
4804 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4805 static const AVOption h263_options[] = {
4806 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4807 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4812 static const AVClass h263_class = {
4813 .class_name = "H.263 encoder",
4814 .item_name = av_default_item_name,
4815 .option = h263_options,
4816 .version = LIBAVUTIL_VERSION_INT,
4819 AVCodec ff_h263_encoder = {
4821 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4822 .type = AVMEDIA_TYPE_VIDEO,
4823 .id = AV_CODEC_ID_H263,
4824 .priv_data_size = sizeof(MpegEncContext),
4825 .init = ff_mpv_encode_init,
4826 .encode2 = ff_mpv_encode_picture,
4827 .close = ff_mpv_encode_end,
4828 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4829 .priv_class = &h263_class,
4832 static const AVOption h263p_options[] = {
4833 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4834 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4835 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4836 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4840 static const AVClass h263p_class = {
4841 .class_name = "H.263p encoder",
4842 .item_name = av_default_item_name,
4843 .option = h263p_options,
4844 .version = LIBAVUTIL_VERSION_INT,
4847 AVCodec ff_h263p_encoder = {
4849 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4850 .type = AVMEDIA_TYPE_VIDEO,
4851 .id = AV_CODEC_ID_H263P,
4852 .priv_data_size = sizeof(MpegEncContext),
4853 .init = ff_mpv_encode_init,
4854 .encode2 = ff_mpv_encode_picture,
4855 .close = ff_mpv_encode_end,
4856 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4857 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4858 .priv_class = &h263p_class,
4861 static const AVClass msmpeg4v2_class = {
4862 .class_name = "msmpeg4v2 encoder",
4863 .item_name = av_default_item_name,
4864 .option = ff_mpv_generic_options,
4865 .version = LIBAVUTIL_VERSION_INT,
4868 AVCodec ff_msmpeg4v2_encoder = {
4869 .name = "msmpeg4v2",
4870 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4871 .type = AVMEDIA_TYPE_VIDEO,
4872 .id = AV_CODEC_ID_MSMPEG4V2,
4873 .priv_data_size = sizeof(MpegEncContext),
4874 .init = ff_mpv_encode_init,
4875 .encode2 = ff_mpv_encode_picture,
4876 .close = ff_mpv_encode_end,
4877 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4878 .priv_class = &msmpeg4v2_class,
4881 static const AVClass msmpeg4v3_class = {
4882 .class_name = "msmpeg4v3 encoder",
4883 .item_name = av_default_item_name,
4884 .option = ff_mpv_generic_options,
4885 .version = LIBAVUTIL_VERSION_INT,
4888 AVCodec ff_msmpeg4v3_encoder = {
4890 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4891 .type = AVMEDIA_TYPE_VIDEO,
4892 .id = AV_CODEC_ID_MSMPEG4V3,
4893 .priv_data_size = sizeof(MpegEncContext),
4894 .init = ff_mpv_encode_init,
4895 .encode2 = ff_mpv_encode_picture,
4896 .close = ff_mpv_encode_end,
4897 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4898 .priv_class = &msmpeg4v3_class,
4901 static const AVClass wmv1_class = {
4902 .class_name = "wmv1 encoder",
4903 .item_name = av_default_item_name,
4904 .option = ff_mpv_generic_options,
4905 .version = LIBAVUTIL_VERSION_INT,
4908 AVCodec ff_wmv1_encoder = {
4910 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4911 .type = AVMEDIA_TYPE_VIDEO,
4912 .id = AV_CODEC_ID_WMV1,
4913 .priv_data_size = sizeof(MpegEncContext),
4914 .init = ff_mpv_encode_init,
4915 .encode2 = ff_mpv_encode_picture,
4916 .close = ff_mpv_encode_end,
4917 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4918 .priv_class = &wmv1_class,