2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "libavutil/timer.h"
46 #include "mpegvideo.h"
47 #include "mpegvideodata.h"
51 #include "mjpegenc_common.h"
53 #include "mpegutils.h"
56 #include "pixblockdsp.h"
60 #include "aandcttab.h"
62 #include "mpeg4video.h"
64 #include "bytestream.h"
70 #define QUANT_BIAS_SHIFT 8
72 #define QMAT_SHIFT_MMX 16
75 static int encode_picture(MpegEncContext *s, int picture_number);
76 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
77 static int sse_mb(MpegEncContext *s);
78 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
79 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
81 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
82 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
84 const AVOption ff_mpv_generic_options[] = {
89 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
90 uint16_t (*qmat16)[2][64],
91 const uint16_t *quant_matrix,
92 int bias, int qmin, int qmax, int intra)
94 FDCTDSPContext *fdsp = &s->fdsp;
98 for (qscale = qmin; qscale <= qmax; qscale++) {
102 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
103 else qscale2 = qscale << 1;
105 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
107 fdsp->fdct == ff_faandct ||
108 #endif /* CONFIG_FAANDCT */
109 fdsp->fdct == ff_jpeg_fdct_islow_10) {
110 for (i = 0; i < 64; i++) {
111 const int j = s->idsp.idct_permutation[i];
112 int64_t den = (int64_t) qscale2 * quant_matrix[j];
113 /* 16 <= qscale * quant_matrix[i] <= 7905
114 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
115 * 19952 <= x <= 249205026
116 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
117 * 3444240 >= (1 << 36) / (x) >= 275 */
119 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
121 } else if (fdsp->fdct == ff_fdct_ifast) {
122 for (i = 0; i < 64; i++) {
123 const int j = s->idsp.idct_permutation[i];
124 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
125 /* 16 <= qscale * quant_matrix[i] <= 7905
126 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
127 * 19952 <= x <= 249205026
128 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
129 * 3444240 >= (1 << 36) / (x) >= 275 */
131 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
134 for (i = 0; i < 64; i++) {
135 const int j = s->idsp.idct_permutation[i];
136 int64_t den = (int64_t) qscale2 * quant_matrix[j];
137 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
138 * Assume x = qscale * quant_matrix[i]
140 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
141 * so 32768 >= (1 << 19) / (x) >= 67 */
142 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
143 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
144 // (qscale * quant_matrix[i]);
145 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
147 if (qmat16[qscale][0][i] == 0 ||
148 qmat16[qscale][0][i] == 128 * 256)
149 qmat16[qscale][0][i] = 128 * 256 - 1;
150 qmat16[qscale][1][i] =
151 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
152 qmat16[qscale][0][i]);
156 for (i = intra; i < 64; i++) {
158 if (fdsp->fdct == ff_fdct_ifast) {
159 max = (8191LL * ff_aanscales[i]) >> 14;
161 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
167 av_log(NULL, AV_LOG_INFO,
168 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
173 static inline void update_qscale(MpegEncContext *s)
175 if (s->q_scale_type == 1 && 0) {
177 int bestdiff=INT_MAX;
180 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
181 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
182 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
183 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
185 if (diff < bestdiff) {
192 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
193 (FF_LAMBDA_SHIFT + 7);
194 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
197 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
201 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
207 for (i = 0; i < 64; i++) {
208 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
215 * init s->current_picture.qscale_table from s->lambda_table
217 void ff_init_qscale_tab(MpegEncContext *s)
219 int8_t * const qscale_table = s->current_picture.qscale_table;
222 for (i = 0; i < s->mb_num; i++) {
223 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
224 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
225 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
230 static void update_duplicate_context_after_me(MpegEncContext *dst,
233 #define COPY(a) dst->a= src->a
235 COPY(current_picture);
241 COPY(picture_in_gop_number);
242 COPY(gop_picture_number);
243 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
244 COPY(progressive_frame); // FIXME don't set in encode_header
245 COPY(partitioned_frame); // FIXME don't set in encode_header
250 * Set the given MpegEncContext to defaults for encoding.
251 * the changed fields will not depend upon the prior state of the MpegEncContext.
253 static void mpv_encode_defaults(MpegEncContext *s)
256 ff_mpv_common_defaults(s);
258 for (i = -16; i < 16; i++) {
259 default_fcode_tab[i + MAX_MV] = 1;
261 s->me.mv_penalty = default_mv_penalty;
262 s->fcode_tab = default_fcode_tab;
264 s->input_picture_number = 0;
265 s->picture_in_gop_number = 0;
268 av_cold int ff_dct_encode_init(MpegEncContext *s) {
270 ff_dct_encode_init_x86(s);
272 if (CONFIG_H263_ENCODER)
273 ff_h263dsp_init(&s->h263dsp);
274 if (!s->dct_quantize)
275 s->dct_quantize = ff_dct_quantize_c;
277 s->denoise_dct = denoise_dct_c;
278 s->fast_dct_quantize = s->dct_quantize;
279 if (s->avctx->trellis)
280 s->dct_quantize = dct_quantize_trellis_c;
285 /* init video encoder */
286 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
288 MpegEncContext *s = avctx->priv_data;
289 AVCPBProperties *cpb_props;
290 int i, ret, format_supported;
292 mpv_encode_defaults(s);
294 switch (avctx->codec_id) {
295 case AV_CODEC_ID_MPEG2VIDEO:
296 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
297 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
298 av_log(avctx, AV_LOG_ERROR,
299 "only YUV420 and YUV422 are supported\n");
303 case AV_CODEC_ID_MJPEG:
304 case AV_CODEC_ID_AMV:
305 format_supported = 0;
306 /* JPEG color space */
307 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
308 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
309 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
310 (avctx->color_range == AVCOL_RANGE_JPEG &&
311 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
312 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
313 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
314 format_supported = 1;
315 /* MPEG color space */
316 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
317 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
318 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
319 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
320 format_supported = 1;
322 if (!format_supported) {
323 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
328 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
329 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
334 switch (avctx->pix_fmt) {
335 case AV_PIX_FMT_YUVJ444P:
336 case AV_PIX_FMT_YUV444P:
337 s->chroma_format = CHROMA_444;
339 case AV_PIX_FMT_YUVJ422P:
340 case AV_PIX_FMT_YUV422P:
341 s->chroma_format = CHROMA_422;
343 case AV_PIX_FMT_YUVJ420P:
344 case AV_PIX_FMT_YUV420P:
346 s->chroma_format = CHROMA_420;
350 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
352 #if FF_API_PRIVATE_OPT
353 FF_DISABLE_DEPRECATION_WARNINGS
354 if (avctx->rtp_payload_size)
355 s->rtp_payload_size = avctx->rtp_payload_size;
356 if (avctx->me_penalty_compensation)
357 s->me_penalty_compensation = avctx->me_penalty_compensation;
359 s->me_pre = avctx->pre_me;
360 FF_ENABLE_DEPRECATION_WARNINGS
363 s->bit_rate = avctx->bit_rate;
364 s->width = avctx->width;
365 s->height = avctx->height;
366 if (avctx->gop_size > 600 &&
367 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
368 av_log(avctx, AV_LOG_WARNING,
369 "keyframe interval too large!, reducing it from %d to %d\n",
370 avctx->gop_size, 600);
371 avctx->gop_size = 600;
373 s->gop_size = avctx->gop_size;
375 if (avctx->max_b_frames > MAX_B_FRAMES) {
376 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
377 "is %d.\n", MAX_B_FRAMES);
378 avctx->max_b_frames = MAX_B_FRAMES;
380 s->max_b_frames = avctx->max_b_frames;
381 s->codec_id = avctx->codec->id;
382 s->strict_std_compliance = avctx->strict_std_compliance;
383 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
384 s->rtp_mode = !!s->rtp_payload_size;
385 s->intra_dc_precision = avctx->intra_dc_precision;
387 // workaround some differences between how applications specify dc precision
388 if (s->intra_dc_precision < 0) {
389 s->intra_dc_precision += 8;
390 } else if (s->intra_dc_precision >= 8)
391 s->intra_dc_precision -= 8;
393 if (s->intra_dc_precision < 0) {
394 av_log(avctx, AV_LOG_ERROR,
395 "intra dc precision must be positive, note some applications use"
396 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
397 return AVERROR(EINVAL);
400 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
401 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
402 return AVERROR(EINVAL);
404 s->user_specified_pts = AV_NOPTS_VALUE;
406 if (s->gop_size <= 1) {
413 #if FF_API_MOTION_EST
414 FF_DISABLE_DEPRECATION_WARNINGS
415 s->me_method = avctx->me_method;
416 FF_ENABLE_DEPRECATION_WARNINGS
420 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
423 FF_DISABLE_DEPRECATION_WARNINGS
424 if (avctx->border_masking != 0.0)
425 s->border_masking = avctx->border_masking;
426 FF_ENABLE_DEPRECATION_WARNINGS
429 s->adaptive_quant = (s->avctx->lumi_masking ||
430 s->avctx->dark_masking ||
431 s->avctx->temporal_cplx_masking ||
432 s->avctx->spatial_cplx_masking ||
433 s->avctx->p_masking ||
435 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
438 s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
440 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
441 switch(avctx->codec_id) {
442 case AV_CODEC_ID_MPEG1VIDEO:
443 case AV_CODEC_ID_MPEG2VIDEO:
444 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
446 case AV_CODEC_ID_MPEG4:
447 case AV_CODEC_ID_MSMPEG4V1:
448 case AV_CODEC_ID_MSMPEG4V2:
449 case AV_CODEC_ID_MSMPEG4V3:
450 if (avctx->rc_max_rate >= 15000000) {
451 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
452 } else if(avctx->rc_max_rate >= 2000000) {
453 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
454 } else if(avctx->rc_max_rate >= 384000) {
455 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
457 avctx->rc_buffer_size = 40;
458 avctx->rc_buffer_size *= 16384;
461 if (avctx->rc_buffer_size) {
462 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
466 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
467 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
471 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
472 av_log(avctx, AV_LOG_INFO,
473 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
476 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
477 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
481 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
482 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
486 if (avctx->rc_max_rate &&
487 avctx->rc_max_rate == avctx->bit_rate &&
488 avctx->rc_max_rate != avctx->rc_min_rate) {
489 av_log(avctx, AV_LOG_INFO,
490 "impossible bitrate constraints, this will fail\n");
493 if (avctx->rc_buffer_size &&
494 avctx->bit_rate * (int64_t)avctx->time_base.num >
495 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
496 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
500 if (!s->fixed_qscale &&
501 avctx->bit_rate * av_q2d(avctx->time_base) >
502 avctx->bit_rate_tolerance) {
503 av_log(avctx, AV_LOG_WARNING,
504 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, (int64_t)avctx->bit_rate);
505 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
508 if (s->avctx->rc_max_rate &&
509 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
510 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
511 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
512 90000LL * (avctx->rc_buffer_size - 1) >
513 s->avctx->rc_max_rate * 0xFFFFLL) {
514 av_log(avctx, AV_LOG_INFO,
515 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
516 "specified vbv buffer is too large for the given bitrate!\n");
519 if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
520 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
521 s->codec_id != AV_CODEC_ID_FLV1) {
522 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
526 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
527 av_log(avctx, AV_LOG_ERROR,
528 "OBMC is only supported with simple mb decision\n");
532 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
533 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
537 if (s->max_b_frames &&
538 s->codec_id != AV_CODEC_ID_MPEG4 &&
539 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
540 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
541 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
544 if (s->max_b_frames < 0) {
545 av_log(avctx, AV_LOG_ERROR,
546 "max b frames must be 0 or positive for mpegvideo based encoders\n");
550 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
551 s->codec_id == AV_CODEC_ID_H263 ||
552 s->codec_id == AV_CODEC_ID_H263P) &&
553 (avctx->sample_aspect_ratio.num > 255 ||
554 avctx->sample_aspect_ratio.den > 255)) {
555 av_log(avctx, AV_LOG_WARNING,
556 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
557 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
558 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
559 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
562 if ((s->codec_id == AV_CODEC_ID_H263 ||
563 s->codec_id == AV_CODEC_ID_H263P) &&
564 (avctx->width > 2048 ||
565 avctx->height > 1152 )) {
566 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
569 if ((s->codec_id == AV_CODEC_ID_H263 ||
570 s->codec_id == AV_CODEC_ID_H263P) &&
571 ((avctx->width &3) ||
572 (avctx->height&3) )) {
573 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
577 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
578 (avctx->width > 4095 ||
579 avctx->height > 4095 )) {
580 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
584 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
585 (avctx->width > 16383 ||
586 avctx->height > 16383 )) {
587 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
591 if (s->codec_id == AV_CODEC_ID_RV10 &&
593 avctx->height&15 )) {
594 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
595 return AVERROR(EINVAL);
598 if (s->codec_id == AV_CODEC_ID_RV20 &&
601 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
602 return AVERROR(EINVAL);
605 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
606 s->codec_id == AV_CODEC_ID_WMV2) &&
608 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
612 if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
613 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
614 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
618 #if FF_API_PRIVATE_OPT
619 FF_DISABLE_DEPRECATION_WARNINGS
620 if (avctx->mpeg_quant)
621 s->mpeg_quant = avctx->mpeg_quant;
622 FF_ENABLE_DEPRECATION_WARNINGS
625 // FIXME mpeg2 uses that too
626 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
627 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
628 av_log(avctx, AV_LOG_ERROR,
629 "mpeg2 style quantization not supported by codec\n");
633 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
634 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
638 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
639 s->avctx->mb_decision != FF_MB_DECISION_RD) {
640 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
644 #if FF_API_PRIVATE_OPT
645 FF_DISABLE_DEPRECATION_WARNINGS
646 if (avctx->scenechange_threshold)
647 s->scenechange_threshold = avctx->scenechange_threshold;
648 FF_ENABLE_DEPRECATION_WARNINGS
651 if (s->scenechange_threshold < 1000000000 &&
652 (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
653 av_log(avctx, AV_LOG_ERROR,
654 "closed gop with scene change detection are not supported yet, "
655 "set threshold to 1000000000\n");
659 if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
660 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
661 av_log(avctx, AV_LOG_ERROR,
662 "low delay forcing is only available for mpeg2\n");
665 if (s->max_b_frames != 0) {
666 av_log(avctx, AV_LOG_ERROR,
667 "B-frames cannot be used with low delay\n");
672 if (s->q_scale_type == 1) {
673 if (avctx->qmax > 28) {
674 av_log(avctx, AV_LOG_ERROR,
675 "non linear quant only supports qmax <= 28 currently\n");
680 if (avctx->slices > 1 &&
681 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
682 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
683 return AVERROR(EINVAL);
686 if (s->avctx->thread_count > 1 &&
687 s->codec_id != AV_CODEC_ID_MPEG4 &&
688 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
689 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
690 s->codec_id != AV_CODEC_ID_MJPEG &&
691 (s->codec_id != AV_CODEC_ID_H263P)) {
692 av_log(avctx, AV_LOG_ERROR,
693 "multi threaded encoding not supported by codec\n");
697 if (s->avctx->thread_count < 1) {
698 av_log(avctx, AV_LOG_ERROR,
699 "automatic thread number detection not supported by codec, "
704 if (!avctx->time_base.den || !avctx->time_base.num) {
705 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
709 #if FF_API_PRIVATE_OPT
710 FF_DISABLE_DEPRECATION_WARNINGS
711 if (avctx->b_frame_strategy)
712 s->b_frame_strategy = avctx->b_frame_strategy;
713 if (avctx->b_sensitivity != 40)
714 s->b_sensitivity = avctx->b_sensitivity;
715 FF_ENABLE_DEPRECATION_WARNINGS
718 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
719 av_log(avctx, AV_LOG_INFO,
720 "notice: b_frame_strategy only affects the first pass\n");
721 s->b_frame_strategy = 0;
724 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
726 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
727 avctx->time_base.den /= i;
728 avctx->time_base.num /= i;
732 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
733 // (a + x * 3 / 8) / x
734 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
735 s->inter_quant_bias = 0;
737 s->intra_quant_bias = 0;
739 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
742 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
743 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
744 return AVERROR(EINVAL);
747 #if FF_API_QUANT_BIAS
748 FF_DISABLE_DEPRECATION_WARNINGS
749 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
750 s->intra_quant_bias = avctx->intra_quant_bias;
751 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
752 s->inter_quant_bias = avctx->inter_quant_bias;
753 FF_ENABLE_DEPRECATION_WARNINGS
756 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
758 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
759 s->avctx->time_base.den > (1 << 16) - 1) {
760 av_log(avctx, AV_LOG_ERROR,
761 "timebase %d/%d not supported by MPEG 4 standard, "
762 "the maximum admitted value for the timebase denominator "
763 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
767 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
769 switch (avctx->codec->id) {
770 case AV_CODEC_ID_MPEG1VIDEO:
771 s->out_format = FMT_MPEG1;
772 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
773 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
775 case AV_CODEC_ID_MPEG2VIDEO:
776 s->out_format = FMT_MPEG1;
777 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
778 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
781 case AV_CODEC_ID_MJPEG:
782 case AV_CODEC_ID_AMV:
783 s->out_format = FMT_MJPEG;
784 s->intra_only = 1; /* force intra only for jpeg */
785 if (!CONFIG_MJPEG_ENCODER ||
786 ff_mjpeg_encode_init(s) < 0)
791 case AV_CODEC_ID_H261:
792 if (!CONFIG_H261_ENCODER)
794 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
795 av_log(avctx, AV_LOG_ERROR,
796 "The specified picture size of %dx%d is not valid for the "
797 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
798 s->width, s->height);
801 s->out_format = FMT_H261;
804 s->rtp_mode = 0; /* Sliced encoding not supported */
806 case AV_CODEC_ID_H263:
807 if (!CONFIG_H263_ENCODER)
809 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
810 s->width, s->height) == 8) {
811 av_log(avctx, AV_LOG_ERROR,
812 "The specified picture size of %dx%d is not valid for "
813 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
814 "352x288, 704x576, and 1408x1152. "
815 "Try H.263+.\n", s->width, s->height);
818 s->out_format = FMT_H263;
822 case AV_CODEC_ID_H263P:
823 s->out_format = FMT_H263;
826 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
827 s->modified_quant = s->h263_aic;
828 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
829 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
832 /* These are just to be sure */
836 case AV_CODEC_ID_FLV1:
837 s->out_format = FMT_H263;
838 s->h263_flv = 2; /* format = 1; 11-bit codes */
839 s->unrestricted_mv = 1;
840 s->rtp_mode = 0; /* don't allow GOB */
844 case AV_CODEC_ID_RV10:
845 s->out_format = FMT_H263;
849 case AV_CODEC_ID_RV20:
850 s->out_format = FMT_H263;
853 s->modified_quant = 1;
857 s->unrestricted_mv = 0;
859 case AV_CODEC_ID_MPEG4:
860 s->out_format = FMT_H263;
862 s->unrestricted_mv = 1;
863 s->low_delay = s->max_b_frames ? 0 : 1;
864 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
866 case AV_CODEC_ID_MSMPEG4V2:
867 s->out_format = FMT_H263;
869 s->unrestricted_mv = 1;
870 s->msmpeg4_version = 2;
874 case AV_CODEC_ID_MSMPEG4V3:
875 s->out_format = FMT_H263;
877 s->unrestricted_mv = 1;
878 s->msmpeg4_version = 3;
879 s->flipflop_rounding = 1;
883 case AV_CODEC_ID_WMV1:
884 s->out_format = FMT_H263;
886 s->unrestricted_mv = 1;
887 s->msmpeg4_version = 4;
888 s->flipflop_rounding = 1;
892 case AV_CODEC_ID_WMV2:
893 s->out_format = FMT_H263;
895 s->unrestricted_mv = 1;
896 s->msmpeg4_version = 5;
897 s->flipflop_rounding = 1;
905 #if FF_API_PRIVATE_OPT
906 FF_DISABLE_DEPRECATION_WARNINGS
907 if (avctx->noise_reduction)
908 s->noise_reduction = avctx->noise_reduction;
909 FF_ENABLE_DEPRECATION_WARNINGS
912 avctx->has_b_frames = !s->low_delay;
916 s->progressive_frame =
917 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
918 AV_CODEC_FLAG_INTERLACED_ME) ||
923 if (ff_mpv_common_init(s) < 0)
926 ff_fdctdsp_init(&s->fdsp, avctx);
927 ff_me_cmp_init(&s->mecc, avctx);
928 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
929 ff_pixblockdsp_init(&s->pdsp, avctx);
930 ff_qpeldsp_init(&s->qdsp);
932 if (s->msmpeg4_version) {
933 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
934 2 * 2 * (MAX_LEVEL + 1) *
935 (MAX_RUN + 1) * 2 * sizeof(int), fail);
937 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
939 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
940 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
941 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
942 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
943 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
944 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
945 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
946 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
947 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
948 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
951 if (s->noise_reduction) {
952 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
953 2 * 64 * sizeof(uint16_t), fail);
956 ff_dct_encode_init(s);
958 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
959 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
961 if (s->slice_context_count > 1) {
964 if (avctx->codec_id == AV_CODEC_ID_H263P)
965 s->h263_slice_structured = 1;
968 s->quant_precision = 5;
970 #if FF_API_PRIVATE_OPT
971 FF_DISABLE_DEPRECATION_WARNINGS
972 if (avctx->frame_skip_threshold)
973 s->frame_skip_threshold = avctx->frame_skip_threshold;
974 if (avctx->frame_skip_factor)
975 s->frame_skip_factor = avctx->frame_skip_factor;
976 if (avctx->frame_skip_exp)
977 s->frame_skip_exp = avctx->frame_skip_exp;
978 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
979 s->frame_skip_cmp = avctx->frame_skip_cmp;
980 FF_ENABLE_DEPRECATION_WARNINGS
983 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
984 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
986 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
987 ff_h261_encode_init(s);
988 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
989 ff_h263_encode_init(s);
990 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
991 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
993 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
994 && s->out_format == FMT_MPEG1)
995 ff_mpeg1_encode_init(s);
998 for (i = 0; i < 64; i++) {
999 int j = s->idsp.idct_permutation[i];
1000 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1002 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1003 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1004 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1005 s->intra_matrix[j] =
1006 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1009 s->chroma_intra_matrix[j] =
1010 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1011 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1013 if (s->avctx->intra_matrix)
1014 s->intra_matrix[j] = s->avctx->intra_matrix[i];
1015 if (s->avctx->inter_matrix)
1016 s->inter_matrix[j] = s->avctx->inter_matrix[i];
1019 /* precompute matrix */
1020 /* for mjpeg, we do include qscale in the matrix */
1021 if (s->out_format != FMT_MJPEG) {
1022 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1023 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1025 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1026 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1030 if (ff_rate_control_init(s) < 0)
1033 #if FF_API_ERROR_RATE
1034 FF_DISABLE_DEPRECATION_WARNINGS
1035 if (avctx->error_rate)
1036 s->error_rate = avctx->error_rate;
1037 FF_ENABLE_DEPRECATION_WARNINGS;
1040 #if FF_API_NORMALIZE_AQP
1041 FF_DISABLE_DEPRECATION_WARNINGS
1042 if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
1043 s->mpv_flags |= FF_MPV_FLAG_NAQ;
1044 FF_ENABLE_DEPRECATION_WARNINGS;
1048 FF_DISABLE_DEPRECATION_WARNINGS
1049 if (avctx->flags & CODEC_FLAG_MV0)
1050 s->mpv_flags |= FF_MPV_FLAG_MV0;
1051 FF_ENABLE_DEPRECATION_WARNINGS
1055 FF_DISABLE_DEPRECATION_WARNINGS
1056 if (avctx->rc_qsquish != 0.0)
1057 s->rc_qsquish = avctx->rc_qsquish;
1058 if (avctx->rc_qmod_amp != 0.0)
1059 s->rc_qmod_amp = avctx->rc_qmod_amp;
1060 if (avctx->rc_qmod_freq)
1061 s->rc_qmod_freq = avctx->rc_qmod_freq;
1062 if (avctx->rc_buffer_aggressivity != 1.0)
1063 s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
1064 if (avctx->rc_initial_cplx != 0.0)
1065 s->rc_initial_cplx = avctx->rc_initial_cplx;
1067 s->lmin = avctx->lmin;
1069 s->lmax = avctx->lmax;
1072 av_freep(&s->rc_eq);
1073 s->rc_eq = av_strdup(avctx->rc_eq);
1075 return AVERROR(ENOMEM);
1077 FF_ENABLE_DEPRECATION_WARNINGS
1080 #if FF_API_PRIVATE_OPT
1081 FF_DISABLE_DEPRECATION_WARNINGS
1082 if (avctx->brd_scale)
1083 s->brd_scale = avctx->brd_scale;
1085 if (avctx->prediction_method)
1086 s->pred = avctx->prediction_method + 1;
1087 FF_ENABLE_DEPRECATION_WARNINGS
1090 if (s->b_frame_strategy == 2) {
1091 for (i = 0; i < s->max_b_frames + 2; i++) {
1092 s->tmp_frames[i] = av_frame_alloc();
1093 if (!s->tmp_frames[i])
1094 return AVERROR(ENOMEM);
1096 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1097 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1098 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1100 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1106 cpb_props = ff_add_cpb_side_data(avctx);
1108 return AVERROR(ENOMEM);
1109 cpb_props->max_bitrate = avctx->rc_max_rate;
1110 cpb_props->min_bitrate = avctx->rc_min_rate;
1111 cpb_props->avg_bitrate = avctx->bit_rate;
1112 cpb_props->buffer_size = avctx->rc_buffer_size;
1116 ff_mpv_encode_end(avctx);
1117 return AVERROR_UNKNOWN;
1120 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1122 MpegEncContext *s = avctx->priv_data;
1125 ff_rate_control_uninit(s);
1127 ff_mpv_common_end(s);
1128 if (CONFIG_MJPEG_ENCODER &&
1129 s->out_format == FMT_MJPEG)
1130 ff_mjpeg_encode_close(s);
1132 av_freep(&avctx->extradata);
1134 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1135 av_frame_free(&s->tmp_frames[i]);
1137 ff_free_picture_tables(&s->new_picture);
1138 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1140 av_freep(&s->avctx->stats_out);
1141 av_freep(&s->ac_stats);
1143 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1144 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1145 s->q_chroma_intra_matrix= NULL;
1146 s->q_chroma_intra_matrix16= NULL;
1147 av_freep(&s->q_intra_matrix);
1148 av_freep(&s->q_inter_matrix);
1149 av_freep(&s->q_intra_matrix16);
1150 av_freep(&s->q_inter_matrix16);
1151 av_freep(&s->input_picture);
1152 av_freep(&s->reordered_input_picture);
1153 av_freep(&s->dct_offset);
1158 static int get_sae(uint8_t *src, int ref, int stride)
1163 for (y = 0; y < 16; y++) {
1164 for (x = 0; x < 16; x++) {
1165 acc += FFABS(src[x + y * stride] - ref);
1172 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1173 uint8_t *ref, int stride)
1179 h = s->height & ~15;
1181 for (y = 0; y < h; y += 16) {
1182 for (x = 0; x < w; x += 16) {
1183 int offset = x + y * stride;
1184 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1186 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1187 int sae = get_sae(src + offset, mean, stride);
1189 acc += sae + 500 < sad;
1195 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1197 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1198 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1199 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1200 &s->linesize, &s->uvlinesize);
1203 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1205 Picture *pic = NULL;
1207 int i, display_picture_number = 0, ret;
1208 int encoding_delay = s->max_b_frames ? s->max_b_frames
1209 : (s->low_delay ? 0 : 1);
1210 int flush_offset = 1;
1215 display_picture_number = s->input_picture_number++;
1217 if (pts != AV_NOPTS_VALUE) {
1218 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1219 int64_t last = s->user_specified_pts;
1222 av_log(s->avctx, AV_LOG_ERROR,
1223 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1225 return AVERROR(EINVAL);
1228 if (!s->low_delay && display_picture_number == 1)
1229 s->dts_delta = pts - last;
1231 s->user_specified_pts = pts;
1233 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1234 s->user_specified_pts =
1235 pts = s->user_specified_pts + 1;
1236 av_log(s->avctx, AV_LOG_INFO,
1237 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1240 pts = display_picture_number;
1244 if (!pic_arg->buf[0] ||
1245 pic_arg->linesize[0] != s->linesize ||
1246 pic_arg->linesize[1] != s->uvlinesize ||
1247 pic_arg->linesize[2] != s->uvlinesize)
1249 if ((s->width & 15) || (s->height & 15))
1251 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1253 if (s->linesize & (STRIDE_ALIGN-1))
1256 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1257 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1259 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1263 pic = &s->picture[i];
1267 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1270 ret = alloc_picture(s, pic, direct);
1275 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1276 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1277 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1280 int h_chroma_shift, v_chroma_shift;
1281 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1285 for (i = 0; i < 3; i++) {
1286 int src_stride = pic_arg->linesize[i];
1287 int dst_stride = i ? s->uvlinesize : s->linesize;
1288 int h_shift = i ? h_chroma_shift : 0;
1289 int v_shift = i ? v_chroma_shift : 0;
1290 int w = s->width >> h_shift;
1291 int h = s->height >> v_shift;
1292 uint8_t *src = pic_arg->data[i];
1293 uint8_t *dst = pic->f->data[i];
1296 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1297 && !s->progressive_sequence
1298 && FFALIGN(s->height, 32) - s->height > 16)
1301 if (!s->avctx->rc_buffer_size)
1302 dst += INPLACE_OFFSET;
1304 if (src_stride == dst_stride)
1305 memcpy(dst, src, src_stride * h);
1308 uint8_t *dst2 = dst;
1310 memcpy(dst2, src, w);
1315 if ((s->width & 15) || (s->height & (vpad-1))) {
1316 s->mpvencdsp.draw_edges(dst, dst_stride,
1326 ret = av_frame_copy_props(pic->f, pic_arg);
1330 pic->f->display_picture_number = display_picture_number;
1331 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1333 /* Flushing: When we have not received enough input frames,
1334 * ensure s->input_picture[0] contains the first picture */
1335 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1336 if (s->input_picture[flush_offset])
1339 if (flush_offset <= 1)
1342 encoding_delay = encoding_delay - flush_offset + 1;
1345 /* shift buffer entries */
1346 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1347 s->input_picture[i - flush_offset] = s->input_picture[i];
1349 s->input_picture[encoding_delay] = (Picture*) pic;
1354 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1358 int64_t score64 = 0;
1360 for (plane = 0; plane < 3; plane++) {
1361 const int stride = p->f->linesize[plane];
1362 const int bw = plane ? 1 : 2;
1363 for (y = 0; y < s->mb_height * bw; y++) {
1364 for (x = 0; x < s->mb_width * bw; x++) {
1365 int off = p->shared ? 0 : 16;
1366 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1367 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1368 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1370 switch (FFABS(s->frame_skip_exp)) {
1371 case 0: score = FFMAX(score, v); break;
1372 case 1: score += FFABS(v); break;
1373 case 2: score64 += v * (int64_t)v; break;
1374 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1375 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1384 if (s->frame_skip_exp < 0)
1385 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1386 -1.0/s->frame_skip_exp);
1388 if (score64 < s->frame_skip_threshold)
1390 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1395 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1397 AVPacket pkt = { 0 };
1398 int ret, got_output;
1400 av_init_packet(&pkt);
1401 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1406 av_packet_unref(&pkt);
1410 static int estimate_best_b_count(MpegEncContext *s)
1412 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1413 AVCodecContext *c = avcodec_alloc_context3(NULL);
1414 const int scale = s->brd_scale;
1415 int i, j, out_size, p_lambda, b_lambda, lambda2;
1416 int64_t best_rd = INT64_MAX;
1417 int best_b_count = -1;
1420 return AVERROR(ENOMEM);
1421 av_assert0(scale >= 0 && scale <= 3);
1424 //s->next_picture_ptr->quality;
1425 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1426 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1427 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1428 if (!b_lambda) // FIXME we should do this somewhere else
1429 b_lambda = p_lambda;
1430 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1433 c->width = s->width >> scale;
1434 c->height = s->height >> scale;
1435 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1436 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1437 c->mb_decision = s->avctx->mb_decision;
1438 c->me_cmp = s->avctx->me_cmp;
1439 c->mb_cmp = s->avctx->mb_cmp;
1440 c->me_sub_cmp = s->avctx->me_sub_cmp;
1441 c->pix_fmt = AV_PIX_FMT_YUV420P;
1442 c->time_base = s->avctx->time_base;
1443 c->max_b_frames = s->max_b_frames;
1445 if (avcodec_open2(c, codec, NULL) < 0)
1448 for (i = 0; i < s->max_b_frames + 2; i++) {
1449 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1450 s->next_picture_ptr;
1453 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1454 pre_input = *pre_input_ptr;
1455 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1457 if (!pre_input.shared && i) {
1458 data[0] += INPLACE_OFFSET;
1459 data[1] += INPLACE_OFFSET;
1460 data[2] += INPLACE_OFFSET;
1463 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1464 s->tmp_frames[i]->linesize[0],
1466 pre_input.f->linesize[0],
1467 c->width, c->height);
1468 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1469 s->tmp_frames[i]->linesize[1],
1471 pre_input.f->linesize[1],
1472 c->width >> 1, c->height >> 1);
1473 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1474 s->tmp_frames[i]->linesize[2],
1476 pre_input.f->linesize[2],
1477 c->width >> 1, c->height >> 1);
1481 for (j = 0; j < s->max_b_frames + 1; j++) {
1484 if (!s->input_picture[j])
1487 c->error[0] = c->error[1] = c->error[2] = 0;
1489 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1490 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1492 out_size = encode_frame(c, s->tmp_frames[0]);
1494 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1496 for (i = 0; i < s->max_b_frames + 1; i++) {
1497 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1499 s->tmp_frames[i + 1]->pict_type = is_p ?
1500 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1501 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1503 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1505 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1508 /* get the delayed frames */
1510 out_size = encode_frame(c, NULL);
1511 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1514 rd += c->error[0] + c->error[1] + c->error[2];
1522 avcodec_free_context(&c);
1524 return best_b_count;
1527 static int select_input_picture(MpegEncContext *s)
1531 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1532 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1533 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1535 /* set next picture type & ordering */
1536 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1537 if (s->frame_skip_threshold || s->frame_skip_factor) {
1538 if (s->picture_in_gop_number < s->gop_size &&
1539 s->next_picture_ptr &&
1540 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1541 // FIXME check that the gop check above is +-1 correct
1542 av_frame_unref(s->input_picture[0]->f);
1544 ff_vbv_update(s, 0);
1550 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1551 !s->next_picture_ptr || s->intra_only) {
1552 s->reordered_input_picture[0] = s->input_picture[0];
1553 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1554 s->reordered_input_picture[0]->f->coded_picture_number =
1555 s->coded_picture_number++;
1559 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1560 for (i = 0; i < s->max_b_frames + 1; i++) {
1561 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1563 if (pict_num >= s->rc_context.num_entries)
1565 if (!s->input_picture[i]) {
1566 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1570 s->input_picture[i]->f->pict_type =
1571 s->rc_context.entry[pict_num].new_pict_type;
1575 if (s->b_frame_strategy == 0) {
1576 b_frames = s->max_b_frames;
1577 while (b_frames && !s->input_picture[b_frames])
1579 } else if (s->b_frame_strategy == 1) {
1580 for (i = 1; i < s->max_b_frames + 1; i++) {
1581 if (s->input_picture[i] &&
1582 s->input_picture[i]->b_frame_score == 0) {
1583 s->input_picture[i]->b_frame_score =
1585 s->input_picture[i ]->f->data[0],
1586 s->input_picture[i - 1]->f->data[0],
1590 for (i = 0; i < s->max_b_frames + 1; i++) {
1591 if (!s->input_picture[i] ||
1592 s->input_picture[i]->b_frame_score - 1 >
1593 s->mb_num / s->b_sensitivity)
1597 b_frames = FFMAX(0, i - 1);
1600 for (i = 0; i < b_frames + 1; i++) {
1601 s->input_picture[i]->b_frame_score = 0;
1603 } else if (s->b_frame_strategy == 2) {
1604 b_frames = estimate_best_b_count(s);
1609 for (i = b_frames - 1; i >= 0; i--) {
1610 int type = s->input_picture[i]->f->pict_type;
1611 if (type && type != AV_PICTURE_TYPE_B)
1614 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1615 b_frames == s->max_b_frames) {
1616 av_log(s->avctx, AV_LOG_ERROR,
1617 "warning, too many B-frames in a row\n");
1620 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1621 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1622 s->gop_size > s->picture_in_gop_number) {
1623 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1625 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1627 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1631 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1632 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1635 s->reordered_input_picture[0] = s->input_picture[b_frames];
1636 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1637 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1638 s->reordered_input_picture[0]->f->coded_picture_number =
1639 s->coded_picture_number++;
1640 for (i = 0; i < b_frames; i++) {
1641 s->reordered_input_picture[i + 1] = s->input_picture[i];
1642 s->reordered_input_picture[i + 1]->f->pict_type =
1644 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1645 s->coded_picture_number++;
1650 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1652 if (s->reordered_input_picture[0]) {
1653 s->reordered_input_picture[0]->reference =
1654 s->reordered_input_picture[0]->f->pict_type !=
1655 AV_PICTURE_TYPE_B ? 3 : 0;
1657 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1660 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1661 // input is a shared pix, so we can't modify it -> allocate a new
1662 // one & ensure that the shared one is reuseable
1665 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1668 pic = &s->picture[i];
1670 pic->reference = s->reordered_input_picture[0]->reference;
1671 if (alloc_picture(s, pic, 0) < 0) {
1675 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1679 /* mark us unused / free shared pic */
1680 av_frame_unref(s->reordered_input_picture[0]->f);
1681 s->reordered_input_picture[0]->shared = 0;
1683 s->current_picture_ptr = pic;
1685 // input is not a shared pix -> reuse buffer for current_pix
1686 s->current_picture_ptr = s->reordered_input_picture[0];
1687 for (i = 0; i < 4; i++) {
1688 s->new_picture.f->data[i] += INPLACE_OFFSET;
1691 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1692 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1693 s->current_picture_ptr)) < 0)
1696 s->picture_number = s->new_picture.f->display_picture_number;
1701 static void frame_end(MpegEncContext *s)
1703 if (s->unrestricted_mv &&
1704 s->current_picture.reference &&
1706 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1707 int hshift = desc->log2_chroma_w;
1708 int vshift = desc->log2_chroma_h;
1709 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1710 s->current_picture.f->linesize[0],
1711 s->h_edge_pos, s->v_edge_pos,
1712 EDGE_WIDTH, EDGE_WIDTH,
1713 EDGE_TOP | EDGE_BOTTOM);
1714 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1715 s->current_picture.f->linesize[1],
1716 s->h_edge_pos >> hshift,
1717 s->v_edge_pos >> vshift,
1718 EDGE_WIDTH >> hshift,
1719 EDGE_WIDTH >> vshift,
1720 EDGE_TOP | EDGE_BOTTOM);
1721 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1722 s->current_picture.f->linesize[2],
1723 s->h_edge_pos >> hshift,
1724 s->v_edge_pos >> vshift,
1725 EDGE_WIDTH >> hshift,
1726 EDGE_WIDTH >> vshift,
1727 EDGE_TOP | EDGE_BOTTOM);
1732 s->last_pict_type = s->pict_type;
1733 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1734 if (s->pict_type!= AV_PICTURE_TYPE_B)
1735 s->last_non_b_pict_type = s->pict_type;
1737 #if FF_API_CODED_FRAME
1738 FF_DISABLE_DEPRECATION_WARNINGS
1739 av_frame_unref(s->avctx->coded_frame);
1740 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1741 FF_ENABLE_DEPRECATION_WARNINGS
1743 #if FF_API_ERROR_FRAME
1744 FF_DISABLE_DEPRECATION_WARNINGS
1745 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1746 sizeof(s->current_picture.encoding_error));
1747 FF_ENABLE_DEPRECATION_WARNINGS
1751 static void update_noise_reduction(MpegEncContext *s)
1755 for (intra = 0; intra < 2; intra++) {
1756 if (s->dct_count[intra] > (1 << 16)) {
1757 for (i = 0; i < 64; i++) {
1758 s->dct_error_sum[intra][i] >>= 1;
1760 s->dct_count[intra] >>= 1;
1763 for (i = 0; i < 64; i++) {
1764 s->dct_offset[intra][i] = (s->noise_reduction *
1765 s->dct_count[intra] +
1766 s->dct_error_sum[intra][i] / 2) /
1767 (s->dct_error_sum[intra][i] + 1);
1772 static int frame_start(MpegEncContext *s)
1776 /* mark & release old frames */
1777 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1778 s->last_picture_ptr != s->next_picture_ptr &&
1779 s->last_picture_ptr->f->buf[0]) {
1780 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1783 s->current_picture_ptr->f->pict_type = s->pict_type;
1784 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1786 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1787 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1788 s->current_picture_ptr)) < 0)
1791 if (s->pict_type != AV_PICTURE_TYPE_B) {
1792 s->last_picture_ptr = s->next_picture_ptr;
1794 s->next_picture_ptr = s->current_picture_ptr;
1797 if (s->last_picture_ptr) {
1798 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1799 if (s->last_picture_ptr->f->buf[0] &&
1800 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1801 s->last_picture_ptr)) < 0)
1804 if (s->next_picture_ptr) {
1805 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1806 if (s->next_picture_ptr->f->buf[0] &&
1807 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1808 s->next_picture_ptr)) < 0)
1812 if (s->picture_structure!= PICT_FRAME) {
1814 for (i = 0; i < 4; i++) {
1815 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1816 s->current_picture.f->data[i] +=
1817 s->current_picture.f->linesize[i];
1819 s->current_picture.f->linesize[i] *= 2;
1820 s->last_picture.f->linesize[i] *= 2;
1821 s->next_picture.f->linesize[i] *= 2;
1825 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1826 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1827 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1828 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1829 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1830 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1832 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1833 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1836 if (s->dct_error_sum) {
1837 av_assert2(s->noise_reduction && s->encoding);
1838 update_noise_reduction(s);
1844 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1845 const AVFrame *pic_arg, int *got_packet)
1847 MpegEncContext *s = avctx->priv_data;
1848 int i, stuffing_count, ret;
1849 int context_count = s->slice_context_count;
1851 s->vbv_ignore_qmax = 0;
1853 s->picture_in_gop_number++;
1855 if (load_input_picture(s, pic_arg) < 0)
1858 if (select_input_picture(s) < 0) {
1863 if (s->new_picture.f->data[0]) {
1864 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1865 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1867 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1868 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1871 s->mb_info_ptr = av_packet_new_side_data(pkt,
1872 AV_PKT_DATA_H263_MB_INFO,
1873 s->mb_width*s->mb_height*12);
1874 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1877 for (i = 0; i < context_count; i++) {
1878 int start_y = s->thread_context[i]->start_mb_y;
1879 int end_y = s->thread_context[i]-> end_mb_y;
1880 int h = s->mb_height;
1881 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1882 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1884 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1887 s->pict_type = s->new_picture.f->pict_type;
1889 ret = frame_start(s);
1893 ret = encode_picture(s, s->picture_number);
1894 if (growing_buffer) {
1895 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1896 pkt->data = s->pb.buf;
1897 pkt->size = avctx->internal->byte_buffer_size;
1902 #if FF_API_STAT_BITS
1903 FF_DISABLE_DEPRECATION_WARNINGS
1904 avctx->header_bits = s->header_bits;
1905 avctx->mv_bits = s->mv_bits;
1906 avctx->misc_bits = s->misc_bits;
1907 avctx->i_tex_bits = s->i_tex_bits;
1908 avctx->p_tex_bits = s->p_tex_bits;
1909 avctx->i_count = s->i_count;
1910 // FIXME f/b_count in avctx
1911 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1912 avctx->skip_count = s->skip_count;
1913 FF_ENABLE_DEPRECATION_WARNINGS
1918 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1919 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1921 if (avctx->rc_buffer_size) {
1922 RateControlContext *rcc = &s->rc_context;
1923 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1924 int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1925 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1927 if (put_bits_count(&s->pb) > max_size &&
1928 s->lambda < s->lmax) {
1929 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1930 (s->qscale + 1) / s->qscale);
1931 if (s->adaptive_quant) {
1933 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1934 s->lambda_table[i] =
1935 FFMAX(s->lambda_table[i] + min_step,
1936 s->lambda_table[i] * (s->qscale + 1) /
1939 s->mb_skipped = 0; // done in frame_start()
1940 // done in encode_picture() so we must undo it
1941 if (s->pict_type == AV_PICTURE_TYPE_P) {
1942 if (s->flipflop_rounding ||
1943 s->codec_id == AV_CODEC_ID_H263P ||
1944 s->codec_id == AV_CODEC_ID_MPEG4)
1945 s->no_rounding ^= 1;
1947 if (s->pict_type != AV_PICTURE_TYPE_B) {
1948 s->time_base = s->last_time_base;
1949 s->last_non_b_time = s->time - s->pp_time;
1951 for (i = 0; i < context_count; i++) {
1952 PutBitContext *pb = &s->thread_context[i]->pb;
1953 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1955 s->vbv_ignore_qmax = 1;
1956 av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1960 av_assert0(s->avctx->rc_max_rate);
1963 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1964 ff_write_pass1_stats(s);
1966 for (i = 0; i < 4; i++) {
1967 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1968 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1970 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1971 s->current_picture_ptr->encoding_error,
1972 (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1975 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1976 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1977 s->misc_bits + s->i_tex_bits +
1979 flush_put_bits(&s->pb);
1980 s->frame_bits = put_bits_count(&s->pb);
1982 stuffing_count = ff_vbv_update(s, s->frame_bits);
1983 s->stuffing_bits = 8*stuffing_count;
1984 if (stuffing_count) {
1985 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1986 stuffing_count + 50) {
1987 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1991 switch (s->codec_id) {
1992 case AV_CODEC_ID_MPEG1VIDEO:
1993 case AV_CODEC_ID_MPEG2VIDEO:
1994 while (stuffing_count--) {
1995 put_bits(&s->pb, 8, 0);
1998 case AV_CODEC_ID_MPEG4:
1999 put_bits(&s->pb, 16, 0);
2000 put_bits(&s->pb, 16, 0x1C3);
2001 stuffing_count -= 4;
2002 while (stuffing_count--) {
2003 put_bits(&s->pb, 8, 0xFF);
2007 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2009 flush_put_bits(&s->pb);
2010 s->frame_bits = put_bits_count(&s->pb);
2013 /* update MPEG-1/2 vbv_delay for CBR */
2014 if (s->avctx->rc_max_rate &&
2015 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
2016 s->out_format == FMT_MPEG1 &&
2017 90000LL * (avctx->rc_buffer_size - 1) <=
2018 s->avctx->rc_max_rate * 0xFFFFLL) {
2019 AVCPBProperties *props;
2022 int vbv_delay, min_delay;
2023 double inbits = s->avctx->rc_max_rate *
2024 av_q2d(s->avctx->time_base);
2025 int minbits = s->frame_bits - 8 *
2026 (s->vbv_delay_ptr - s->pb.buf - 1);
2027 double bits = s->rc_context.buffer_index + minbits - inbits;
2030 av_log(s->avctx, AV_LOG_ERROR,
2031 "Internal error, negative bits\n");
2033 assert(s->repeat_first_field == 0);
2035 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
2036 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
2037 s->avctx->rc_max_rate;
2039 vbv_delay = FFMAX(vbv_delay, min_delay);
2041 av_assert0(vbv_delay < 0xFFFF);
2043 s->vbv_delay_ptr[0] &= 0xF8;
2044 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2045 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2046 s->vbv_delay_ptr[2] &= 0x07;
2047 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2049 props = av_cpb_properties_alloc(&props_size);
2051 return AVERROR(ENOMEM);
2052 props->vbv_delay = vbv_delay * 300;
2054 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2055 (uint8_t*)props, props_size);
2061 #if FF_API_VBV_DELAY
2062 FF_DISABLE_DEPRECATION_WARNINGS
2063 avctx->vbv_delay = vbv_delay * 300;
2064 FF_ENABLE_DEPRECATION_WARNINGS
2067 s->total_bits += s->frame_bits;
2068 #if FF_API_STAT_BITS
2069 FF_DISABLE_DEPRECATION_WARNINGS
2070 avctx->frame_bits = s->frame_bits;
2071 FF_ENABLE_DEPRECATION_WARNINGS
2075 pkt->pts = s->current_picture.f->pts;
2076 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2077 if (!s->current_picture.f->coded_picture_number)
2078 pkt->dts = pkt->pts - s->dts_delta;
2080 pkt->dts = s->reordered_pts;
2081 s->reordered_pts = pkt->pts;
2083 pkt->dts = pkt->pts;
2084 if (s->current_picture.f->key_frame)
2085 pkt->flags |= AV_PKT_FLAG_KEY;
2087 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2092 /* release non-reference frames */
2093 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2094 if (!s->picture[i].reference)
2095 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2098 av_assert1((s->frame_bits & 7) == 0);
2100 pkt->size = s->frame_bits / 8;
2101 *got_packet = !!pkt->size;
2105 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2106 int n, int threshold)
2108 static const char tab[64] = {
2109 3, 2, 2, 1, 1, 1, 1, 1,
2110 1, 1, 1, 1, 1, 1, 1, 1,
2111 1, 1, 1, 1, 1, 1, 1, 1,
2112 0, 0, 0, 0, 0, 0, 0, 0,
2113 0, 0, 0, 0, 0, 0, 0, 0,
2114 0, 0, 0, 0, 0, 0, 0, 0,
2115 0, 0, 0, 0, 0, 0, 0, 0,
2116 0, 0, 0, 0, 0, 0, 0, 0
2121 int16_t *block = s->block[n];
2122 const int last_index = s->block_last_index[n];
2125 if (threshold < 0) {
2127 threshold = -threshold;
2131 /* Are all we could set to zero already zero? */
2132 if (last_index <= skip_dc - 1)
2135 for (i = 0; i <= last_index; i++) {
2136 const int j = s->intra_scantable.permutated[i];
2137 const int level = FFABS(block[j]);
2139 if (skip_dc && i == 0)
2143 } else if (level > 1) {
2149 if (score >= threshold)
2151 for (i = skip_dc; i <= last_index; i++) {
2152 const int j = s->intra_scantable.permutated[i];
2156 s->block_last_index[n] = 0;
2158 s->block_last_index[n] = -1;
2161 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2165 const int maxlevel = s->max_qcoeff;
2166 const int minlevel = s->min_qcoeff;
2170 i = 1; // skip clipping of intra dc
2174 for (; i <= last_index; i++) {
2175 const int j = s->intra_scantable.permutated[i];
2176 int level = block[j];
2178 if (level > maxlevel) {
2181 } else if (level < minlevel) {
2189 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2190 av_log(s->avctx, AV_LOG_INFO,
2191 "warning, clipping %d dct coefficients to %d..%d\n",
2192 overflow, minlevel, maxlevel);
2195 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2199 for (y = 0; y < 8; y++) {
2200 for (x = 0; x < 8; x++) {
2206 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2207 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2208 int v = ptr[x2 + y2 * stride];
2214 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2219 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2220 int motion_x, int motion_y,
2221 int mb_block_height,
2225 int16_t weight[12][64];
2226 int16_t orig[12][64];
2227 const int mb_x = s->mb_x;
2228 const int mb_y = s->mb_y;
2231 int dct_offset = s->linesize * 8; // default for progressive frames
2232 int uv_dct_offset = s->uvlinesize * 8;
2233 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2234 ptrdiff_t wrap_y, wrap_c;
2236 for (i = 0; i < mb_block_count; i++)
2237 skip_dct[i] = s->skipdct;
2239 if (s->adaptive_quant) {
2240 const int last_qp = s->qscale;
2241 const int mb_xy = mb_x + mb_y * s->mb_stride;
2243 s->lambda = s->lambda_table[mb_xy];
2246 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2247 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2248 s->dquant = s->qscale - last_qp;
2250 if (s->out_format == FMT_H263) {
2251 s->dquant = av_clip(s->dquant, -2, 2);
2253 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2255 if (s->pict_type == AV_PICTURE_TYPE_B) {
2256 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2259 if (s->mv_type == MV_TYPE_8X8)
2265 ff_set_qscale(s, last_qp + s->dquant);
2266 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2267 ff_set_qscale(s, s->qscale + s->dquant);
2269 wrap_y = s->linesize;
2270 wrap_c = s->uvlinesize;
2271 ptr_y = s->new_picture.f->data[0] +
2272 (mb_y * 16 * wrap_y) + mb_x * 16;
2273 ptr_cb = s->new_picture.f->data[1] +
2274 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2275 ptr_cr = s->new_picture.f->data[2] +
2276 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2278 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2279 uint8_t *ebuf = s->sc.edge_emu_buffer + 36 * wrap_y;
2280 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2281 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2282 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2284 16, 16, mb_x * 16, mb_y * 16,
2285 s->width, s->height);
2287 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2289 mb_block_width, mb_block_height,
2290 mb_x * mb_block_width, mb_y * mb_block_height,
2292 ptr_cb = ebuf + 16 * wrap_y;
2293 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2295 mb_block_width, mb_block_height,
2296 mb_x * mb_block_width, mb_y * mb_block_height,
2298 ptr_cr = ebuf + 16 * wrap_y + 16;
2302 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2303 int progressive_score, interlaced_score;
2305 s->interlaced_dct = 0;
2306 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2307 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2308 NULL, wrap_y, 8) - 400;
2310 if (progressive_score > 0) {
2311 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2312 NULL, wrap_y * 2, 8) +
2313 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2314 NULL, wrap_y * 2, 8);
2315 if (progressive_score > interlaced_score) {
2316 s->interlaced_dct = 1;
2318 dct_offset = wrap_y;
2319 uv_dct_offset = wrap_c;
2321 if (s->chroma_format == CHROMA_422 ||
2322 s->chroma_format == CHROMA_444)
2328 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2329 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2330 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2331 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2333 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2337 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2338 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2339 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2340 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2341 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2342 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2343 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2344 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2345 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2346 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2347 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2348 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2352 op_pixels_func (*op_pix)[4];
2353 qpel_mc_func (*op_qpix)[16];
2354 uint8_t *dest_y, *dest_cb, *dest_cr;
2356 dest_y = s->dest[0];
2357 dest_cb = s->dest[1];
2358 dest_cr = s->dest[2];
2360 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2361 op_pix = s->hdsp.put_pixels_tab;
2362 op_qpix = s->qdsp.put_qpel_pixels_tab;
2364 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2365 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2368 if (s->mv_dir & MV_DIR_FORWARD) {
2369 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2370 s->last_picture.f->data,
2372 op_pix = s->hdsp.avg_pixels_tab;
2373 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2375 if (s->mv_dir & MV_DIR_BACKWARD) {
2376 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2377 s->next_picture.f->data,
2381 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2382 int progressive_score, interlaced_score;
2384 s->interlaced_dct = 0;
2385 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2386 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2390 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2391 progressive_score -= 400;
2393 if (progressive_score > 0) {
2394 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2396 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2400 if (progressive_score > interlaced_score) {
2401 s->interlaced_dct = 1;
2403 dct_offset = wrap_y;
2404 uv_dct_offset = wrap_c;
2406 if (s->chroma_format == CHROMA_422)
2412 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2413 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2414 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2415 dest_y + dct_offset, wrap_y);
2416 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2417 dest_y + dct_offset + 8, wrap_y);
2419 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2423 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2424 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2425 if (!s->chroma_y_shift) { /* 422 */
2426 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2427 dest_cb + uv_dct_offset, wrap_c);
2428 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2429 dest_cr + uv_dct_offset, wrap_c);
2432 /* pre quantization */
2433 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2434 2 * s->qscale * s->qscale) {
2436 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2438 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2440 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2441 wrap_y, 8) < 20 * s->qscale)
2443 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2444 wrap_y, 8) < 20 * s->qscale)
2446 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2448 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2450 if (!s->chroma_y_shift) { /* 422 */
2451 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2452 dest_cb + uv_dct_offset,
2453 wrap_c, 8) < 20 * s->qscale)
2455 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2456 dest_cr + uv_dct_offset,
2457 wrap_c, 8) < 20 * s->qscale)
2463 if (s->quantizer_noise_shaping) {
2465 get_visual_weight(weight[0], ptr_y , wrap_y);
2467 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2469 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2471 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2473 get_visual_weight(weight[4], ptr_cb , wrap_c);
2475 get_visual_weight(weight[5], ptr_cr , wrap_c);
2476 if (!s->chroma_y_shift) { /* 422 */
2478 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2481 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2484 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2487 /* DCT & quantize */
2488 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2490 for (i = 0; i < mb_block_count; i++) {
2493 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2494 // FIXME we could decide to change to quantizer instead of
2496 // JS: I don't think that would be a good idea it could lower
2497 // quality instead of improve it. Just INTRADC clipping
2498 // deserves changes in quantizer
2500 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2502 s->block_last_index[i] = -1;
2504 if (s->quantizer_noise_shaping) {
2505 for (i = 0; i < mb_block_count; i++) {
2507 s->block_last_index[i] =
2508 dct_quantize_refine(s, s->block[i], weight[i],
2509 orig[i], i, s->qscale);
2514 if (s->luma_elim_threshold && !s->mb_intra)
2515 for (i = 0; i < 4; i++)
2516 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2517 if (s->chroma_elim_threshold && !s->mb_intra)
2518 for (i = 4; i < mb_block_count; i++)
2519 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2521 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2522 for (i = 0; i < mb_block_count; i++) {
2523 if (s->block_last_index[i] == -1)
2524 s->coded_score[i] = INT_MAX / 256;
2529 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2530 s->block_last_index[4] =
2531 s->block_last_index[5] = 0;
2533 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2534 if (!s->chroma_y_shift) { /* 422 / 444 */
2535 for (i=6; i<12; i++) {
2536 s->block_last_index[i] = 0;
2537 s->block[i][0] = s->block[4][0];
2542 // non c quantize code returns incorrect block_last_index FIXME
2543 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2544 for (i = 0; i < mb_block_count; i++) {
2546 if (s->block_last_index[i] > 0) {
2547 for (j = 63; j > 0; j--) {
2548 if (s->block[i][s->intra_scantable.permutated[j]])
2551 s->block_last_index[i] = j;
2556 /* huffman encode */
2557 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2558 case AV_CODEC_ID_MPEG1VIDEO:
2559 case AV_CODEC_ID_MPEG2VIDEO:
2560 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2561 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2563 case AV_CODEC_ID_MPEG4:
2564 if (CONFIG_MPEG4_ENCODER)
2565 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2567 case AV_CODEC_ID_MSMPEG4V2:
2568 case AV_CODEC_ID_MSMPEG4V3:
2569 case AV_CODEC_ID_WMV1:
2570 if (CONFIG_MSMPEG4_ENCODER)
2571 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2573 case AV_CODEC_ID_WMV2:
2574 if (CONFIG_WMV2_ENCODER)
2575 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2577 case AV_CODEC_ID_H261:
2578 if (CONFIG_H261_ENCODER)
2579 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2581 case AV_CODEC_ID_H263:
2582 case AV_CODEC_ID_H263P:
2583 case AV_CODEC_ID_FLV1:
2584 case AV_CODEC_ID_RV10:
2585 case AV_CODEC_ID_RV20:
2586 if (CONFIG_H263_ENCODER)
2587 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2589 case AV_CODEC_ID_MJPEG:
2590 case AV_CODEC_ID_AMV:
2591 if (CONFIG_MJPEG_ENCODER)
2592 ff_mjpeg_encode_mb(s, s->block);
2599 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2601 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2602 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2603 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2606 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2609 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2612 d->mb_skip_run= s->mb_skip_run;
2614 d->last_dc[i] = s->last_dc[i];
2617 d->mv_bits= s->mv_bits;
2618 d->i_tex_bits= s->i_tex_bits;
2619 d->p_tex_bits= s->p_tex_bits;
2620 d->i_count= s->i_count;
2621 d->f_count= s->f_count;
2622 d->b_count= s->b_count;
2623 d->skip_count= s->skip_count;
2624 d->misc_bits= s->misc_bits;
2628 d->qscale= s->qscale;
2629 d->dquant= s->dquant;
2631 d->esc3_level_length= s->esc3_level_length;
2634 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2637 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2638 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2641 d->mb_skip_run= s->mb_skip_run;
2643 d->last_dc[i] = s->last_dc[i];
2646 d->mv_bits= s->mv_bits;
2647 d->i_tex_bits= s->i_tex_bits;
2648 d->p_tex_bits= s->p_tex_bits;
2649 d->i_count= s->i_count;
2650 d->f_count= s->f_count;
2651 d->b_count= s->b_count;
2652 d->skip_count= s->skip_count;
2653 d->misc_bits= s->misc_bits;
2655 d->mb_intra= s->mb_intra;
2656 d->mb_skipped= s->mb_skipped;
2657 d->mv_type= s->mv_type;
2658 d->mv_dir= s->mv_dir;
2660 if(s->data_partitioning){
2662 d->tex_pb= s->tex_pb;
2666 d->block_last_index[i]= s->block_last_index[i];
2667 d->interlaced_dct= s->interlaced_dct;
2668 d->qscale= s->qscale;
2670 d->esc3_level_length= s->esc3_level_length;
2673 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2674 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2675 int *dmin, int *next_block, int motion_x, int motion_y)
2678 uint8_t *dest_backup[3];
2680 copy_context_before_encode(s, backup, type);
2682 s->block= s->blocks[*next_block];
2683 s->pb= pb[*next_block];
2684 if(s->data_partitioning){
2685 s->pb2 = pb2 [*next_block];
2686 s->tex_pb= tex_pb[*next_block];
2690 memcpy(dest_backup, s->dest, sizeof(s->dest));
2691 s->dest[0] = s->sc.rd_scratchpad;
2692 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2693 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2694 av_assert0(s->linesize >= 32); //FIXME
2697 encode_mb(s, motion_x, motion_y);
2699 score= put_bits_count(&s->pb);
2700 if(s->data_partitioning){
2701 score+= put_bits_count(&s->pb2);
2702 score+= put_bits_count(&s->tex_pb);
2705 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2706 ff_mpv_decode_mb(s, s->block);
2708 score *= s->lambda2;
2709 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2713 memcpy(s->dest, dest_backup, sizeof(s->dest));
2720 copy_context_after_encode(best, s, type);
2724 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2725 uint32_t *sq = ff_square_tab + 256;
2730 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2731 else if(w==8 && h==8)
2732 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2736 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2745 static int sse_mb(MpegEncContext *s){
2749 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2750 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2753 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2754 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2755 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2756 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2758 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2759 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2760 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2763 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2764 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2765 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2768 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2769 MpegEncContext *s= *(void**)arg;
2773 s->me.dia_size= s->avctx->pre_dia_size;
2774 s->first_slice_line=1;
2775 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2776 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2777 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2779 s->first_slice_line=0;
2787 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2788 MpegEncContext *s= *(void**)arg;
2790 ff_check_alignment();
2792 s->me.dia_size= s->avctx->dia_size;
2793 s->first_slice_line=1;
2794 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2795 s->mb_x=0; //for block init below
2796 ff_init_block_index(s);
2797 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2798 s->block_index[0]+=2;
2799 s->block_index[1]+=2;
2800 s->block_index[2]+=2;
2801 s->block_index[3]+=2;
2803 /* compute motion vector & mb_type and store in context */
2804 if(s->pict_type==AV_PICTURE_TYPE_B)
2805 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2807 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2809 s->first_slice_line=0;
2814 static int mb_var_thread(AVCodecContext *c, void *arg){
2815 MpegEncContext *s= *(void**)arg;
2818 ff_check_alignment();
2820 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2821 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2824 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2826 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2828 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2829 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2831 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2832 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2833 s->me.mb_var_sum_temp += varc;
2839 static void write_slice_end(MpegEncContext *s){
2840 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2841 if(s->partitioned_frame){
2842 ff_mpeg4_merge_partitions(s);
2845 ff_mpeg4_stuffing(&s->pb);
2846 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2847 ff_mjpeg_encode_stuffing(s);
2850 avpriv_align_put_bits(&s->pb);
2851 flush_put_bits(&s->pb);
2853 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2854 s->misc_bits+= get_bits_diff(s);
2857 static void write_mb_info(MpegEncContext *s)
2859 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2860 int offset = put_bits_count(&s->pb);
2861 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2862 int gobn = s->mb_y / s->gob_index;
2864 if (CONFIG_H263_ENCODER)
2865 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2866 bytestream_put_le32(&ptr, offset);
2867 bytestream_put_byte(&ptr, s->qscale);
2868 bytestream_put_byte(&ptr, gobn);
2869 bytestream_put_le16(&ptr, mba);
2870 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2871 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2872 /* 4MV not implemented */
2873 bytestream_put_byte(&ptr, 0); /* hmv2 */
2874 bytestream_put_byte(&ptr, 0); /* vmv2 */
2877 static void update_mb_info(MpegEncContext *s, int startcode)
2881 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2882 s->mb_info_size += 12;
2883 s->prev_mb_info = s->last_mb_info;
2886 s->prev_mb_info = put_bits_count(&s->pb)/8;
2887 /* This might have incremented mb_info_size above, and we return without
2888 * actually writing any info into that slot yet. But in that case,
2889 * this will be called again at the start of the after writing the
2890 * start code, actually writing the mb info. */
2894 s->last_mb_info = put_bits_count(&s->pb)/8;
2895 if (!s->mb_info_size)
2896 s->mb_info_size += 12;
2900 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2902 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2903 && s->slice_context_count == 1
2904 && s->pb.buf == s->avctx->internal->byte_buffer) {
2905 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2906 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2908 uint8_t *new_buffer = NULL;
2909 int new_buffer_size = 0;
2911 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2912 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2913 return AVERROR(ENOMEM);
2918 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2919 s->avctx->internal->byte_buffer_size + size_increase);
2921 return AVERROR(ENOMEM);
2923 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2924 av_free(s->avctx->internal->byte_buffer);
2925 s->avctx->internal->byte_buffer = new_buffer;
2926 s->avctx->internal->byte_buffer_size = new_buffer_size;
2927 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2928 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2929 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2931 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2932 return AVERROR(EINVAL);
2936 static int encode_thread(AVCodecContext *c, void *arg){
2937 MpegEncContext *s= *(void**)arg;
2939 int chr_h= 16>>s->chroma_y_shift;
2941 MpegEncContext best_s = { 0 }, backup_s;
2942 uint8_t bit_buf[2][MAX_MB_BYTES];
2943 uint8_t bit_buf2[2][MAX_MB_BYTES];
2944 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2945 PutBitContext pb[2], pb2[2], tex_pb[2];
2947 ff_check_alignment();
2950 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2951 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2952 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2955 s->last_bits= put_bits_count(&s->pb);
2966 /* init last dc values */
2967 /* note: quant matrix value (8) is implied here */
2968 s->last_dc[i] = 128 << s->intra_dc_precision;
2970 s->current_picture.encoding_error[i] = 0;
2972 if(s->codec_id==AV_CODEC_ID_AMV){
2973 s->last_dc[0] = 128*8/13;
2974 s->last_dc[1] = 128*8/14;
2975 s->last_dc[2] = 128*8/14;
2978 memset(s->last_mv, 0, sizeof(s->last_mv));
2982 switch(s->codec_id){
2983 case AV_CODEC_ID_H263:
2984 case AV_CODEC_ID_H263P:
2985 case AV_CODEC_ID_FLV1:
2986 if (CONFIG_H263_ENCODER)
2987 s->gob_index = H263_GOB_HEIGHT(s->height);
2989 case AV_CODEC_ID_MPEG4:
2990 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2991 ff_mpeg4_init_partitions(s);
2997 s->first_slice_line = 1;
2998 s->ptr_lastgob = s->pb.buf;
2999 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
3003 ff_set_qscale(s, s->qscale);
3004 ff_init_block_index(s);
3006 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3007 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3008 int mb_type= s->mb_type[xy];
3012 int size_increase = s->avctx->internal->byte_buffer_size/4
3013 + s->mb_width*MAX_MB_BYTES;
3015 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
3016 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
3017 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3020 if(s->data_partitioning){
3021 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3022 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3023 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3029 s->mb_y = mb_y; // moved into loop, can get changed by H.261
3030 ff_update_block_index(s);
3032 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3033 ff_h261_reorder_mb_index(s);
3034 xy= s->mb_y*s->mb_stride + s->mb_x;
3035 mb_type= s->mb_type[xy];
3038 /* write gob / video packet header */
3040 int current_packet_size, is_gob_start;
3042 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3044 is_gob_start = s->rtp_payload_size &&
3045 current_packet_size >= s->rtp_payload_size &&
3048 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3050 switch(s->codec_id){
3051 case AV_CODEC_ID_H263:
3052 case AV_CODEC_ID_H263P:
3053 if(!s->h263_slice_structured)
3054 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3056 case AV_CODEC_ID_MPEG2VIDEO:
3057 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3058 case AV_CODEC_ID_MPEG1VIDEO:
3059 if(s->mb_skip_run) is_gob_start=0;
3061 case AV_CODEC_ID_MJPEG:
3062 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3067 if(s->start_mb_y != mb_y || mb_x!=0){
3070 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3071 ff_mpeg4_init_partitions(s);
3075 av_assert2((put_bits_count(&s->pb)&7) == 0);
3076 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3078 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3079 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3080 int d = 100 / s->error_rate;
3082 current_packet_size=0;
3083 s->pb.buf_ptr= s->ptr_lastgob;
3084 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3088 #if FF_API_RTP_CALLBACK
3089 FF_DISABLE_DEPRECATION_WARNINGS
3090 if (s->avctx->rtp_callback){
3091 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3092 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3094 FF_ENABLE_DEPRECATION_WARNINGS
3096 update_mb_info(s, 1);
3098 switch(s->codec_id){
3099 case AV_CODEC_ID_MPEG4:
3100 if (CONFIG_MPEG4_ENCODER) {
3101 ff_mpeg4_encode_video_packet_header(s);
3102 ff_mpeg4_clean_buffers(s);
3105 case AV_CODEC_ID_MPEG1VIDEO:
3106 case AV_CODEC_ID_MPEG2VIDEO:
3107 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3108 ff_mpeg1_encode_slice_header(s);
3109 ff_mpeg1_clean_buffers(s);
3112 case AV_CODEC_ID_H263:
3113 case AV_CODEC_ID_H263P:
3114 if (CONFIG_H263_ENCODER)
3115 ff_h263_encode_gob_header(s, mb_y);
3119 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3120 int bits= put_bits_count(&s->pb);
3121 s->misc_bits+= bits - s->last_bits;
3125 s->ptr_lastgob += current_packet_size;
3126 s->first_slice_line=1;
3127 s->resync_mb_x=mb_x;
3128 s->resync_mb_y=mb_y;
3132 if( (s->resync_mb_x == s->mb_x)
3133 && s->resync_mb_y+1 == s->mb_y){
3134 s->first_slice_line=0;
3138 s->dquant=0; //only for QP_RD
3140 update_mb_info(s, 0);
3142 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3144 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3146 copy_context_before_encode(&backup_s, s, -1);
3148 best_s.data_partitioning= s->data_partitioning;
3149 best_s.partitioned_frame= s->partitioned_frame;
3150 if(s->data_partitioning){
3151 backup_s.pb2= s->pb2;
3152 backup_s.tex_pb= s->tex_pb;
3155 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3156 s->mv_dir = MV_DIR_FORWARD;
3157 s->mv_type = MV_TYPE_16X16;
3159 s->mv[0][0][0] = s->p_mv_table[xy][0];
3160 s->mv[0][0][1] = s->p_mv_table[xy][1];
3161 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3162 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3164 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3165 s->mv_dir = MV_DIR_FORWARD;
3166 s->mv_type = MV_TYPE_FIELD;
3169 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3170 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3171 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3173 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3174 &dmin, &next_block, 0, 0);
3176 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3177 s->mv_dir = MV_DIR_FORWARD;
3178 s->mv_type = MV_TYPE_16X16;
3182 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3183 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3185 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3186 s->mv_dir = MV_DIR_FORWARD;
3187 s->mv_type = MV_TYPE_8X8;
3190 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3191 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3193 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3194 &dmin, &next_block, 0, 0);
3196 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3197 s->mv_dir = MV_DIR_FORWARD;
3198 s->mv_type = MV_TYPE_16X16;
3200 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3201 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3202 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3203 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3205 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3206 s->mv_dir = MV_DIR_BACKWARD;
3207 s->mv_type = MV_TYPE_16X16;
3209 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3210 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3211 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3212 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3214 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3215 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3216 s->mv_type = MV_TYPE_16X16;
3218 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3219 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3220 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3221 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3222 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3223 &dmin, &next_block, 0, 0);
3225 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3226 s->mv_dir = MV_DIR_FORWARD;
3227 s->mv_type = MV_TYPE_FIELD;
3230 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3231 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3232 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3234 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3235 &dmin, &next_block, 0, 0);
3237 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3238 s->mv_dir = MV_DIR_BACKWARD;
3239 s->mv_type = MV_TYPE_FIELD;
3242 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3243 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3244 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3246 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3247 &dmin, &next_block, 0, 0);
3249 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3250 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3251 s->mv_type = MV_TYPE_FIELD;
3253 for(dir=0; dir<2; dir++){
3255 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3256 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3257 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3260 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3261 &dmin, &next_block, 0, 0);
3263 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3265 s->mv_type = MV_TYPE_16X16;
3269 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3270 &dmin, &next_block, 0, 0);
3271 if(s->h263_pred || s->h263_aic){
3273 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3275 ff_clean_intra_table_entries(s); //old mode?
3279 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3280 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3281 const int last_qp= backup_s.qscale;
3284 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3285 static const int dquant_tab[4]={-1,1,-2,2};
3286 int storecoefs = s->mb_intra && s->dc_val[0];
3288 av_assert2(backup_s.dquant == 0);
3291 s->mv_dir= best_s.mv_dir;
3292 s->mv_type = MV_TYPE_16X16;
3293 s->mb_intra= best_s.mb_intra;
3294 s->mv[0][0][0] = best_s.mv[0][0][0];
3295 s->mv[0][0][1] = best_s.mv[0][0][1];
3296 s->mv[1][0][0] = best_s.mv[1][0][0];
3297 s->mv[1][0][1] = best_s.mv[1][0][1];
3299 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3300 for(; qpi<4; qpi++){
3301 int dquant= dquant_tab[qpi];
3302 qp= last_qp + dquant;
3303 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3305 backup_s.dquant= dquant;
3308 dc[i]= s->dc_val[0][ s->block_index[i] ];
3309 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3313 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3314 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3315 if(best_s.qscale != qp){
3318 s->dc_val[0][ s->block_index[i] ]= dc[i];
3319 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3326 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3327 int mx= s->b_direct_mv_table[xy][0];
3328 int my= s->b_direct_mv_table[xy][1];
3330 backup_s.dquant = 0;
3331 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3333 ff_mpeg4_set_direct_mv(s, mx, my);
3334 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3335 &dmin, &next_block, mx, my);
3337 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3338 backup_s.dquant = 0;
3339 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3341 ff_mpeg4_set_direct_mv(s, 0, 0);
3342 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3343 &dmin, &next_block, 0, 0);
3345 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3348 coded |= s->block_last_index[i];
3351 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3352 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3353 mx=my=0; //FIXME find the one we actually used
3354 ff_mpeg4_set_direct_mv(s, mx, my);
3355 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3363 s->mv_dir= best_s.mv_dir;
3364 s->mv_type = best_s.mv_type;
3366 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3367 s->mv[0][0][1] = best_s.mv[0][0][1];
3368 s->mv[1][0][0] = best_s.mv[1][0][0];
3369 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3372 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3373 &dmin, &next_block, mx, my);
3378 s->current_picture.qscale_table[xy] = best_s.qscale;
3380 copy_context_after_encode(s, &best_s, -1);
3382 pb_bits_count= put_bits_count(&s->pb);
3383 flush_put_bits(&s->pb);
3384 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3387 if(s->data_partitioning){
3388 pb2_bits_count= put_bits_count(&s->pb2);
3389 flush_put_bits(&s->pb2);
3390 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3391 s->pb2= backup_s.pb2;
3393 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3394 flush_put_bits(&s->tex_pb);
3395 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3396 s->tex_pb= backup_s.tex_pb;
3398 s->last_bits= put_bits_count(&s->pb);
3400 if (CONFIG_H263_ENCODER &&
3401 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3402 ff_h263_update_motion_val(s);
3404 if(next_block==0){ //FIXME 16 vs linesize16
3405 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3406 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3407 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3410 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3411 ff_mpv_decode_mb(s, s->block);
3413 int motion_x = 0, motion_y = 0;
3414 s->mv_type=MV_TYPE_16X16;
3415 // only one MB-Type possible
3418 case CANDIDATE_MB_TYPE_INTRA:
3421 motion_x= s->mv[0][0][0] = 0;
3422 motion_y= s->mv[0][0][1] = 0;
3424 case CANDIDATE_MB_TYPE_INTER:
3425 s->mv_dir = MV_DIR_FORWARD;
3427 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3428 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3430 case CANDIDATE_MB_TYPE_INTER_I:
3431 s->mv_dir = MV_DIR_FORWARD;
3432 s->mv_type = MV_TYPE_FIELD;
3435 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3436 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3437 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3440 case CANDIDATE_MB_TYPE_INTER4V:
3441 s->mv_dir = MV_DIR_FORWARD;
3442 s->mv_type = MV_TYPE_8X8;
3445 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3446 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3449 case CANDIDATE_MB_TYPE_DIRECT:
3450 if (CONFIG_MPEG4_ENCODER) {
3451 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3453 motion_x=s->b_direct_mv_table[xy][0];
3454 motion_y=s->b_direct_mv_table[xy][1];
3455 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3458 case CANDIDATE_MB_TYPE_DIRECT0:
3459 if (CONFIG_MPEG4_ENCODER) {
3460 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3462 ff_mpeg4_set_direct_mv(s, 0, 0);
3465 case CANDIDATE_MB_TYPE_BIDIR:
3466 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3468 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3469 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3470 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3471 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3473 case CANDIDATE_MB_TYPE_BACKWARD:
3474 s->mv_dir = MV_DIR_BACKWARD;
3476 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3477 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3479 case CANDIDATE_MB_TYPE_FORWARD:
3480 s->mv_dir = MV_DIR_FORWARD;
3482 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3483 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3485 case CANDIDATE_MB_TYPE_FORWARD_I:
3486 s->mv_dir = MV_DIR_FORWARD;
3487 s->mv_type = MV_TYPE_FIELD;
3490 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3491 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3492 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3495 case CANDIDATE_MB_TYPE_BACKWARD_I:
3496 s->mv_dir = MV_DIR_BACKWARD;
3497 s->mv_type = MV_TYPE_FIELD;
3500 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3501 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3502 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3505 case CANDIDATE_MB_TYPE_BIDIR_I:
3506 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3507 s->mv_type = MV_TYPE_FIELD;
3509 for(dir=0; dir<2; dir++){
3511 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3512 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3513 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3518 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3521 encode_mb(s, motion_x, motion_y);
3523 // RAL: Update last macroblock type
3524 s->last_mv_dir = s->mv_dir;
3526 if (CONFIG_H263_ENCODER &&
3527 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3528 ff_h263_update_motion_val(s);
3530 ff_mpv_decode_mb(s, s->block);
3533 /* clean the MV table in IPS frames for direct mode in B-frames */
3534 if(s->mb_intra /* && I,P,S_TYPE */){
3535 s->p_mv_table[xy][0]=0;
3536 s->p_mv_table[xy][1]=0;
3539 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3543 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3544 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3546 s->current_picture.encoding_error[0] += sse(
3547 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3548 s->dest[0], w, h, s->linesize);
3549 s->current_picture.encoding_error[1] += sse(
3550 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3551 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3552 s->current_picture.encoding_error[2] += sse(
3553 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3554 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3557 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3558 ff_h263_loop_filter(s);
3560 ff_dlog(s->avctx, "MB %d %d bits\n",
3561 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3565 //not beautiful here but we must write it before flushing so it has to be here
3566 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3567 ff_msmpeg4_encode_ext_header(s);
3571 #if FF_API_RTP_CALLBACK
3572 FF_DISABLE_DEPRECATION_WARNINGS
3573 /* Send the last GOB if RTP */
3574 if (s->avctx->rtp_callback) {
3575 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3576 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3577 /* Call the RTP callback to send the last GOB */
3579 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3581 FF_ENABLE_DEPRECATION_WARNINGS
3587 #define MERGE(field) dst->field += src->field; src->field=0
3588 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3589 MERGE(me.scene_change_score);
3590 MERGE(me.mc_mb_var_sum_temp);
3591 MERGE(me.mb_var_sum_temp);
3594 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3597 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3598 MERGE(dct_count[1]);
3607 MERGE(er.error_count);
3608 MERGE(padding_bug_score);
3609 MERGE(current_picture.encoding_error[0]);
3610 MERGE(current_picture.encoding_error[1]);
3611 MERGE(current_picture.encoding_error[2]);
3613 if (dst->noise_reduction){
3614 for(i=0; i<64; i++){
3615 MERGE(dct_error_sum[0][i]);
3616 MERGE(dct_error_sum[1][i]);
3620 assert(put_bits_count(&src->pb) % 8 ==0);
3621 assert(put_bits_count(&dst->pb) % 8 ==0);
3622 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3623 flush_put_bits(&dst->pb);
3626 static int estimate_qp(MpegEncContext *s, int dry_run){
3627 if (s->next_lambda){
3628 s->current_picture_ptr->f->quality =
3629 s->current_picture.f->quality = s->next_lambda;
3630 if(!dry_run) s->next_lambda= 0;
3631 } else if (!s->fixed_qscale) {
3632 s->current_picture_ptr->f->quality =
3633 s->current_picture.f->quality = ff_rate_estimate_qscale(s, dry_run);
3634 if (s->current_picture.f->quality < 0)
3638 if(s->adaptive_quant){
3639 switch(s->codec_id){
3640 case AV_CODEC_ID_MPEG4:
3641 if (CONFIG_MPEG4_ENCODER)
3642 ff_clean_mpeg4_qscales(s);
3644 case AV_CODEC_ID_H263:
3645 case AV_CODEC_ID_H263P:
3646 case AV_CODEC_ID_FLV1:
3647 if (CONFIG_H263_ENCODER)
3648 ff_clean_h263_qscales(s);
3651 ff_init_qscale_tab(s);
3654 s->lambda= s->lambda_table[0];
3657 s->lambda = s->current_picture.f->quality;
3662 /* must be called before writing the header */
3663 static void set_frame_distances(MpegEncContext * s){
3664 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3665 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3667 if(s->pict_type==AV_PICTURE_TYPE_B){
3668 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3669 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3671 s->pp_time= s->time - s->last_non_b_time;
3672 s->last_non_b_time= s->time;
3673 assert(s->picture_number==0 || s->pp_time > 0);
3677 static int encode_picture(MpegEncContext *s, int picture_number)
3681 int context_count = s->slice_context_count;
3683 s->picture_number = picture_number;
3685 /* Reset the average MB variance */
3686 s->me.mb_var_sum_temp =
3687 s->me.mc_mb_var_sum_temp = 0;
3689 /* we need to initialize some time vars before we can encode B-frames */
3690 // RAL: Condition added for MPEG1VIDEO
3691 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3692 set_frame_distances(s);
3693 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3694 ff_set_mpeg4_time(s);
3696 s->me.scene_change_score=0;
3698 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3700 if(s->pict_type==AV_PICTURE_TYPE_I){
3701 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3702 else s->no_rounding=0;
3703 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3704 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3705 s->no_rounding ^= 1;
3708 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3709 if (estimate_qp(s,1) < 0)
3711 ff_get_2pass_fcode(s);
3712 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3713 if(s->pict_type==AV_PICTURE_TYPE_B)
3714 s->lambda= s->last_lambda_for[s->pict_type];
3716 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3720 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3721 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3722 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3723 s->q_chroma_intra_matrix = s->q_intra_matrix;
3724 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3727 s->mb_intra=0; //for the rate distortion & bit compare functions
3728 for(i=1; i<context_count; i++){
3729 ret = ff_update_duplicate_context(s->thread_context[i], s);
3737 /* Estimate motion for every MB */
3738 if(s->pict_type != AV_PICTURE_TYPE_I){
3739 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3740 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3741 if (s->pict_type != AV_PICTURE_TYPE_B) {
3742 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3744 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3748 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3749 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3751 for(i=0; i<s->mb_stride*s->mb_height; i++)
3752 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3754 if(!s->fixed_qscale){
3755 /* finding spatial complexity for I-frame rate control */
3756 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3759 for(i=1; i<context_count; i++){
3760 merge_context_after_me(s, s->thread_context[i]);
3762 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3763 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3766 if (s->me.scene_change_score > s->scenechange_threshold &&
3767 s->pict_type == AV_PICTURE_TYPE_P) {
3768 s->pict_type= AV_PICTURE_TYPE_I;
3769 for(i=0; i<s->mb_stride*s->mb_height; i++)
3770 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3771 if(s->msmpeg4_version >= 3)
3773 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3774 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3778 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3779 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3781 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3783 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3784 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3785 s->f_code= FFMAX3(s->f_code, a, b);
3788 ff_fix_long_p_mvs(s);
3789 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3790 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3794 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3795 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3800 if(s->pict_type==AV_PICTURE_TYPE_B){
3803 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3804 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3805 s->f_code = FFMAX(a, b);
3807 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3808 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3809 s->b_code = FFMAX(a, b);
3811 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3812 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3813 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3814 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3815 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3817 for(dir=0; dir<2; dir++){
3820 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3821 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3822 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3823 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3831 if (estimate_qp(s, 0) < 0)
3834 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3835 s->pict_type == AV_PICTURE_TYPE_I &&
3836 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3837 s->qscale= 3; //reduce clipping problems
3839 if (s->out_format == FMT_MJPEG) {
3840 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3841 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3843 if (s->avctx->intra_matrix) {
3845 luma_matrix = s->avctx->intra_matrix;
3847 if (s->avctx->chroma_intra_matrix)
3848 chroma_matrix = s->avctx->chroma_intra_matrix;
3850 /* for mjpeg, we do include qscale in the matrix */
3852 int j = s->idsp.idct_permutation[i];
3854 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3855 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3857 s->y_dc_scale_table=
3858 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3859 s->chroma_intra_matrix[0] =
3860 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3861 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3862 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3863 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3864 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3867 if(s->codec_id == AV_CODEC_ID_AMV){
3868 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3869 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3871 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3873 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3874 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3876 s->y_dc_scale_table= y;
3877 s->c_dc_scale_table= c;
3878 s->intra_matrix[0] = 13;
3879 s->chroma_intra_matrix[0] = 14;
3880 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3881 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3882 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3883 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3887 //FIXME var duplication
3888 s->current_picture_ptr->f->key_frame =
3889 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3890 s->current_picture_ptr->f->pict_type =
3891 s->current_picture.f->pict_type = s->pict_type;
3893 if (s->current_picture.f->key_frame)
3894 s->picture_in_gop_number=0;
3896 s->mb_x = s->mb_y = 0;
3897 s->last_bits= put_bits_count(&s->pb);
3898 switch(s->out_format) {
3900 if (CONFIG_MJPEG_ENCODER)
3901 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3902 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3905 if (CONFIG_H261_ENCODER)
3906 ff_h261_encode_picture_header(s, picture_number);
3909 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3910 ff_wmv2_encode_picture_header(s, picture_number);
3911 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3912 ff_msmpeg4_encode_picture_header(s, picture_number);
3913 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3914 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3917 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3918 ret = ff_rv10_encode_picture_header(s, picture_number);
3922 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3923 ff_rv20_encode_picture_header(s, picture_number);
3924 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3925 ff_flv_encode_picture_header(s, picture_number);
3926 else if (CONFIG_H263_ENCODER)
3927 ff_h263_encode_picture_header(s, picture_number);
3930 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3931 ff_mpeg1_encode_picture_header(s, picture_number);
3936 bits= put_bits_count(&s->pb);
3937 s->header_bits= bits - s->last_bits;
3939 for(i=1; i<context_count; i++){
3940 update_duplicate_context_after_me(s->thread_context[i], s);
3942 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3943 for(i=1; i<context_count; i++){
3944 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3945 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3946 merge_context_after_encode(s, s->thread_context[i]);
3952 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3953 const int intra= s->mb_intra;
3956 s->dct_count[intra]++;
3958 for(i=0; i<64; i++){
3959 int level= block[i];
3963 s->dct_error_sum[intra][i] += level;
3964 level -= s->dct_offset[intra][i];
3965 if(level<0) level=0;
3967 s->dct_error_sum[intra][i] -= level;
3968 level += s->dct_offset[intra][i];
3969 if(level>0) level=0;
3976 static int dct_quantize_trellis_c(MpegEncContext *s,
3977 int16_t *block, int n,
3978 int qscale, int *overflow){
3980 const uint16_t *matrix;
3981 const uint8_t *scantable= s->intra_scantable.scantable;
3982 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3984 unsigned int threshold1, threshold2;
3996 int coeff_count[64];
3997 int qmul, qadd, start_i, last_non_zero, i, dc;
3998 const int esc_length= s->ac_esc_length;
4000 uint8_t * last_length;
4001 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4004 s->fdsp.fdct(block);
4006 if(s->dct_error_sum)
4007 s->denoise_dct(s, block);
4009 qadd= ((qscale-1)|1)*8;
4011 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4012 else mpeg2_qscale = qscale << 1;
4023 /* For AIC we skip quant/dequant of INTRADC */
4028 /* note: block[0] is assumed to be positive */
4029 block[0] = (block[0] + (q >> 1)) / q;
4032 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4033 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4034 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4035 bias= 1<<(QMAT_SHIFT-1);
4037 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4038 length = s->intra_chroma_ac_vlc_length;
4039 last_length= s->intra_chroma_ac_vlc_last_length;
4041 length = s->intra_ac_vlc_length;
4042 last_length= s->intra_ac_vlc_last_length;
4047 qmat = s->q_inter_matrix[qscale];
4048 matrix = s->inter_matrix;
4049 length = s->inter_ac_vlc_length;
4050 last_length= s->inter_ac_vlc_last_length;
4054 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4055 threshold2= (threshold1<<1);
4057 for(i=63; i>=start_i; i--) {
4058 const int j = scantable[i];
4059 int level = block[j] * qmat[j];
4061 if(((unsigned)(level+threshold1))>threshold2){
4067 for(i=start_i; i<=last_non_zero; i++) {
4068 const int j = scantable[i];
4069 int level = block[j] * qmat[j];
4071 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4072 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4073 if(((unsigned)(level+threshold1))>threshold2){
4075 level= (bias + level)>>QMAT_SHIFT;
4077 coeff[1][i]= level-1;
4078 // coeff[2][k]= level-2;
4080 level= (bias - level)>>QMAT_SHIFT;
4081 coeff[0][i]= -level;
4082 coeff[1][i]= -level+1;
4083 // coeff[2][k]= -level+2;
4085 coeff_count[i]= FFMIN(level, 2);
4086 av_assert2(coeff_count[i]);
4089 coeff[0][i]= (level>>31)|1;
4094 *overflow= s->max_qcoeff < max; //overflow might have happened
4096 if(last_non_zero < start_i){
4097 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4098 return last_non_zero;
4101 score_tab[start_i]= 0;
4102 survivor[0]= start_i;
4105 for(i=start_i; i<=last_non_zero; i++){
4106 int level_index, j, zero_distortion;
4107 int dct_coeff= FFABS(block[ scantable[i] ]);
4108 int best_score=256*256*256*120;
4110 if (s->fdsp.fdct == ff_fdct_ifast)
4111 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4112 zero_distortion= dct_coeff*dct_coeff;
4114 for(level_index=0; level_index < coeff_count[i]; level_index++){
4116 int level= coeff[level_index][i];
4117 const int alevel= FFABS(level);
4122 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4123 unquant_coeff= alevel*qmul + qadd;
4124 } else if(s->out_format == FMT_MJPEG) {
4125 j = s->idsp.idct_permutation[scantable[i]];
4126 unquant_coeff = alevel * matrix[j] * 8;
4128 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4130 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4131 unquant_coeff = (unquant_coeff - 1) | 1;
4133 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4134 unquant_coeff = (unquant_coeff - 1) | 1;
4139 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4141 if((level&(~127)) == 0){
4142 for(j=survivor_count-1; j>=0; j--){
4143 int run= i - survivor[j];
4144 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4145 score += score_tab[i-run];
4147 if(score < best_score){
4150 level_tab[i+1]= level-64;
4154 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4155 for(j=survivor_count-1; j>=0; j--){
4156 int run= i - survivor[j];
4157 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4158 score += score_tab[i-run];
4159 if(score < last_score){
4162 last_level= level-64;
4168 distortion += esc_length*lambda;
4169 for(j=survivor_count-1; j>=0; j--){
4170 int run= i - survivor[j];
4171 int score= distortion + score_tab[i-run];
4173 if(score < best_score){
4176 level_tab[i+1]= level-64;
4180 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4181 for(j=survivor_count-1; j>=0; j--){
4182 int run= i - survivor[j];
4183 int score= distortion + score_tab[i-run];
4184 if(score < last_score){
4187 last_level= level-64;
4195 score_tab[i+1]= best_score;
4197 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4198 if(last_non_zero <= 27){
4199 for(; survivor_count; survivor_count--){
4200 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4204 for(; survivor_count; survivor_count--){
4205 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4210 survivor[ survivor_count++ ]= i+1;
4213 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4214 last_score= 256*256*256*120;
4215 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4216 int score= score_tab[i];
4218 score += lambda * 2; // FIXME more exact?
4220 if(score < last_score){
4223 last_level= level_tab[i];
4224 last_run= run_tab[i];
4229 s->coded_score[n] = last_score;
4231 dc= FFABS(block[0]);
4232 last_non_zero= last_i - 1;
4233 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4235 if(last_non_zero < start_i)
4236 return last_non_zero;
4238 if(last_non_zero == 0 && start_i == 0){
4240 int best_score= dc * dc;
4242 for(i=0; i<coeff_count[0]; i++){
4243 int level= coeff[i][0];
4244 int alevel= FFABS(level);
4245 int unquant_coeff, score, distortion;
4247 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4248 unquant_coeff= (alevel*qmul + qadd)>>3;
4250 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4251 unquant_coeff = (unquant_coeff - 1) | 1;
4253 unquant_coeff = (unquant_coeff + 4) >> 3;
4254 unquant_coeff<<= 3 + 3;
4256 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4258 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4259 else score= distortion + esc_length*lambda;
4261 if(score < best_score){
4263 best_level= level - 64;
4266 block[0]= best_level;
4267 s->coded_score[n] = best_score - dc*dc;
4268 if(best_level == 0) return -1;
4269 else return last_non_zero;
4273 av_assert2(last_level);
4275 block[ perm_scantable[last_non_zero] ]= last_level;
4278 for(; i>start_i; i -= run_tab[i] + 1){
4279 block[ perm_scantable[i-1] ]= level_tab[i];
4282 return last_non_zero;
4285 //#define REFINE_STATS 1
4286 static int16_t basis[64][64];
4288 static void build_basis(uint8_t *perm){
4295 double s= 0.25*(1<<BASIS_SHIFT);
4297 int perm_index= perm[index];
4298 if(i==0) s*= sqrt(0.5);
4299 if(j==0) s*= sqrt(0.5);
4300 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4307 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4308 int16_t *block, int16_t *weight, int16_t *orig,
4311 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4312 const uint8_t *scantable= s->intra_scantable.scantable;
4313 const uint8_t *perm_scantable= s->intra_scantable.permutated;
4314 // unsigned int threshold1, threshold2;
4319 int qmul, qadd, start_i, last_non_zero, i, dc;
4321 uint8_t * last_length;
4323 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4326 static int after_last=0;
4327 static int to_zero=0;
4328 static int from_zero=0;
4331 static int messed_sign=0;
4334 if(basis[0][0] == 0)
4335 build_basis(s->idsp.idct_permutation);
4346 /* For AIC we skip quant/dequant of INTRADC */
4350 q <<= RECON_SHIFT-3;
4351 /* note: block[0] is assumed to be positive */
4353 // block[0] = (block[0] + (q >> 1)) / q;
4355 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4356 // bias= 1<<(QMAT_SHIFT-1);
4357 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4358 length = s->intra_chroma_ac_vlc_length;
4359 last_length= s->intra_chroma_ac_vlc_last_length;
4361 length = s->intra_ac_vlc_length;
4362 last_length= s->intra_ac_vlc_last_length;
4367 length = s->inter_ac_vlc_length;
4368 last_length= s->inter_ac_vlc_last_length;
4370 last_non_zero = s->block_last_index[n];
4375 dc += (1<<(RECON_SHIFT-1));
4376 for(i=0; i<64; i++){
4377 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4380 STOP_TIMER("memset rem[]")}
4383 for(i=0; i<64; i++){
4388 w= FFABS(weight[i]) + qns*one;
4389 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4392 // w=weight[i] = (63*qns + (w/2)) / w;
4395 av_assert2(w<(1<<6));
4398 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4404 for(i=start_i; i<=last_non_zero; i++){
4405 int j= perm_scantable[i];
4406 const int level= block[j];
4410 if(level<0) coeff= qmul*level - qadd;
4411 else coeff= qmul*level + qadd;
4412 run_tab[rle_index++]=run;
4415 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4421 if(last_non_zero>0){
4422 STOP_TIMER("init rem[]")
4429 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4432 int run2, best_unquant_change=0, analyze_gradient;
4436 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4438 if(analyze_gradient){
4442 for(i=0; i<64; i++){
4445 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4448 STOP_TIMER("rem*w*w")}
4458 const int level= block[0];
4459 int change, old_coeff;
4461 av_assert2(s->mb_intra);
4465 for(change=-1; change<=1; change+=2){
4466 int new_level= level + change;
4467 int score, new_coeff;
4469 new_coeff= q*new_level;
4470 if(new_coeff >= 2048 || new_coeff < 0)
4473 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4474 new_coeff - old_coeff);
4475 if(score<best_score){
4478 best_change= change;
4479 best_unquant_change= new_coeff - old_coeff;
4486 run2= run_tab[rle_index++];
4490 for(i=start_i; i<64; i++){
4491 int j= perm_scantable[i];
4492 const int level= block[j];
4493 int change, old_coeff;
4495 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4499 if(level<0) old_coeff= qmul*level - qadd;
4500 else old_coeff= qmul*level + qadd;
4501 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4505 av_assert2(run2>=0 || i >= last_non_zero );
4508 for(change=-1; change<=1; change+=2){
4509 int new_level= level + change;
4510 int score, new_coeff, unquant_change;
4513 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4517 if(new_level<0) new_coeff= qmul*new_level - qadd;
4518 else new_coeff= qmul*new_level + qadd;
4519 if(new_coeff >= 2048 || new_coeff <= -2048)
4521 //FIXME check for overflow
4524 if(level < 63 && level > -63){
4525 if(i < last_non_zero)
4526 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4527 - length[UNI_AC_ENC_INDEX(run, level+64)];
4529 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4530 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4533 av_assert2(FFABS(new_level)==1);
4535 if(analyze_gradient){
4536 int g= d1[ scantable[i] ];
4537 if(g && (g^new_level) >= 0)
4541 if(i < last_non_zero){
4542 int next_i= i + run2 + 1;
4543 int next_level= block[ perm_scantable[next_i] ] + 64;
4545 if(next_level&(~127))
4548 if(next_i < last_non_zero)
4549 score += length[UNI_AC_ENC_INDEX(run, 65)]
4550 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4551 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4553 score += length[UNI_AC_ENC_INDEX(run, 65)]
4554 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4555 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4557 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4559 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4560 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4566 av_assert2(FFABS(level)==1);
4568 if(i < last_non_zero){
4569 int next_i= i + run2 + 1;
4570 int next_level= block[ perm_scantable[next_i] ] + 64;
4572 if(next_level&(~127))
4575 if(next_i < last_non_zero)
4576 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4577 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4578 - length[UNI_AC_ENC_INDEX(run, 65)];
4580 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4581 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4582 - length[UNI_AC_ENC_INDEX(run, 65)];
4584 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4586 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4587 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4594 unquant_change= new_coeff - old_coeff;
4595 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4597 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4599 if(score<best_score){
4602 best_change= change;
4603 best_unquant_change= unquant_change;
4607 prev_level= level + 64;
4608 if(prev_level&(~127))
4617 STOP_TIMER("iterative step")}
4621 int j= perm_scantable[ best_coeff ];
4623 block[j] += best_change;
4625 if(best_coeff > last_non_zero){
4626 last_non_zero= best_coeff;
4627 av_assert2(block[j]);
4634 if(block[j] - best_change){
4635 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4647 for(; last_non_zero>=start_i; last_non_zero--){
4648 if(block[perm_scantable[last_non_zero]])
4654 if(256*256*256*64 % count == 0){
4655 av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4660 for(i=start_i; i<=last_non_zero; i++){
4661 int j= perm_scantable[i];
4662 const int level= block[j];
4665 run_tab[rle_index++]=run;
4672 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4678 if(last_non_zero>0){
4679 STOP_TIMER("iterative search")
4684 return last_non_zero;
4688 * Permute an 8x8 block according to permutation.
4689 * @param block the block which will be permuted according to
4690 * the given permutation vector
4691 * @param permutation the permutation vector
4692 * @param last the last non zero coefficient in scantable order, used to
4693 * speed the permutation up
4694 * @param scantable the used scantable, this is only used to speed the
4695 * permutation up, the block is not (inverse) permutated
4696 * to scantable order!
4698 void ff_block_permute(int16_t *block, uint8_t *permutation,
4699 const uint8_t *scantable, int last)
4706 //FIXME it is ok but not clean and might fail for some permutations
4707 // if (permutation[1] == 1)
4710 for (i = 0; i <= last; i++) {
4711 const int j = scantable[i];
4716 for (i = 0; i <= last; i++) {
4717 const int j = scantable[i];
4718 const int perm_j = permutation[j];
4719 block[perm_j] = temp[j];
4723 int ff_dct_quantize_c(MpegEncContext *s,
4724 int16_t *block, int n,
4725 int qscale, int *overflow)
4727 int i, j, level, last_non_zero, q, start_i;
4729 const uint8_t *scantable= s->intra_scantable.scantable;
4732 unsigned int threshold1, threshold2;
4734 s->fdsp.fdct(block);
4736 if(s->dct_error_sum)
4737 s->denoise_dct(s, block);
4747 /* For AIC we skip quant/dequant of INTRADC */
4750 /* note: block[0] is assumed to be positive */
4751 block[0] = (block[0] + (q >> 1)) / q;
4754 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4755 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4759 qmat = s->q_inter_matrix[qscale];
4760 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4762 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4763 threshold2= (threshold1<<1);
4764 for(i=63;i>=start_i;i--) {
4766 level = block[j] * qmat[j];
4768 if(((unsigned)(level+threshold1))>threshold2){
4775 for(i=start_i; i<=last_non_zero; i++) {
4777 level = block[j] * qmat[j];
4779 // if( bias+level >= (1<<QMAT_SHIFT)
4780 // || bias-level >= (1<<QMAT_SHIFT)){
4781 if(((unsigned)(level+threshold1))>threshold2){
4783 level= (bias + level)>>QMAT_SHIFT;
4786 level= (bias - level)>>QMAT_SHIFT;
4794 *overflow= s->max_qcoeff < max; //overflow might have happened
4796 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4797 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4798 ff_block_permute(block, s->idsp.idct_permutation,
4799 scantable, last_non_zero);
4801 return last_non_zero;
4804 #define OFFSET(x) offsetof(MpegEncContext, x)
4805 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4806 static const AVOption h263_options[] = {
4807 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4808 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4813 static const AVClass h263_class = {
4814 .class_name = "H.263 encoder",
4815 .item_name = av_default_item_name,
4816 .option = h263_options,
4817 .version = LIBAVUTIL_VERSION_INT,
4820 AVCodec ff_h263_encoder = {
4822 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4823 .type = AVMEDIA_TYPE_VIDEO,
4824 .id = AV_CODEC_ID_H263,
4825 .priv_data_size = sizeof(MpegEncContext),
4826 .init = ff_mpv_encode_init,
4827 .encode2 = ff_mpv_encode_picture,
4828 .close = ff_mpv_encode_end,
4829 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4830 .priv_class = &h263_class,
4833 static const AVOption h263p_options[] = {
4834 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4835 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4836 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4837 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4841 static const AVClass h263p_class = {
4842 .class_name = "H.263p encoder",
4843 .item_name = av_default_item_name,
4844 .option = h263p_options,
4845 .version = LIBAVUTIL_VERSION_INT,
4848 AVCodec ff_h263p_encoder = {
4850 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4851 .type = AVMEDIA_TYPE_VIDEO,
4852 .id = AV_CODEC_ID_H263P,
4853 .priv_data_size = sizeof(MpegEncContext),
4854 .init = ff_mpv_encode_init,
4855 .encode2 = ff_mpv_encode_picture,
4856 .close = ff_mpv_encode_end,
4857 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4858 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4859 .priv_class = &h263p_class,
4862 static const AVClass msmpeg4v2_class = {
4863 .class_name = "msmpeg4v2 encoder",
4864 .item_name = av_default_item_name,
4865 .option = ff_mpv_generic_options,
4866 .version = LIBAVUTIL_VERSION_INT,
4869 AVCodec ff_msmpeg4v2_encoder = {
4870 .name = "msmpeg4v2",
4871 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4872 .type = AVMEDIA_TYPE_VIDEO,
4873 .id = AV_CODEC_ID_MSMPEG4V2,
4874 .priv_data_size = sizeof(MpegEncContext),
4875 .init = ff_mpv_encode_init,
4876 .encode2 = ff_mpv_encode_picture,
4877 .close = ff_mpv_encode_end,
4878 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4879 .priv_class = &msmpeg4v2_class,
4882 static const AVClass msmpeg4v3_class = {
4883 .class_name = "msmpeg4v3 encoder",
4884 .item_name = av_default_item_name,
4885 .option = ff_mpv_generic_options,
4886 .version = LIBAVUTIL_VERSION_INT,
4889 AVCodec ff_msmpeg4v3_encoder = {
4891 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4892 .type = AVMEDIA_TYPE_VIDEO,
4893 .id = AV_CODEC_ID_MSMPEG4V3,
4894 .priv_data_size = sizeof(MpegEncContext),
4895 .init = ff_mpv_encode_init,
4896 .encode2 = ff_mpv_encode_picture,
4897 .close = ff_mpv_encode_end,
4898 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4899 .priv_class = &msmpeg4v3_class,
4902 static const AVClass wmv1_class = {
4903 .class_name = "wmv1 encoder",
4904 .item_name = av_default_item_name,
4905 .option = ff_mpv_generic_options,
4906 .version = LIBAVUTIL_VERSION_INT,
4909 AVCodec ff_wmv1_encoder = {
4911 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4912 .type = AVMEDIA_TYPE_VIDEO,
4913 .id = AV_CODEC_ID_WMV1,
4914 .priv_data_size = sizeof(MpegEncContext),
4915 .init = ff_mpv_encode_init,
4916 .encode2 = ff_mpv_encode_picture,
4917 .close = ff_mpv_encode_end,
4918 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4919 .priv_class = &wmv1_class,