2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "libavutil/timer.h"
46 #include "mpegvideo.h"
47 #include "mpegvideodata.h"
51 #include "mjpegenc_common.h"
53 #include "mpegutils.h"
56 #include "pixblockdsp.h"
60 #include "aandcttab.h"
62 #include "mpeg4video.h"
64 #include "bytestream.h"
70 #define QUANT_BIAS_SHIFT 8
72 #define QMAT_SHIFT_MMX 16
75 static int encode_picture(MpegEncContext *s, int picture_number);
76 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
77 static int sse_mb(MpegEncContext *s);
78 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
79 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
81 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
82 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
84 const AVOption ff_mpv_generic_options[] = {
89 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
90 uint16_t (*qmat16)[2][64],
91 const uint16_t *quant_matrix,
92 int bias, int qmin, int qmax, int intra)
94 FDCTDSPContext *fdsp = &s->fdsp;
98 for (qscale = qmin; qscale <= qmax; qscale++) {
102 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
103 else qscale2 = qscale << 1;
105 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
107 fdsp->fdct == ff_faandct ||
108 #endif /* CONFIG_FAANDCT */
109 fdsp->fdct == ff_jpeg_fdct_islow_10) {
110 for (i = 0; i < 64; i++) {
111 const int j = s->idsp.idct_permutation[i];
112 int64_t den = (int64_t) qscale2 * quant_matrix[j];
113 /* 16 <= qscale * quant_matrix[i] <= 7905
114 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
115 * 19952 <= x <= 249205026
116 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
117 * 3444240 >= (1 << 36) / (x) >= 275 */
119 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
121 } else if (fdsp->fdct == ff_fdct_ifast) {
122 for (i = 0; i < 64; i++) {
123 const int j = s->idsp.idct_permutation[i];
124 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
125 /* 16 <= qscale * quant_matrix[i] <= 7905
126 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
127 * 19952 <= x <= 249205026
128 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
129 * 3444240 >= (1 << 36) / (x) >= 275 */
131 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
134 for (i = 0; i < 64; i++) {
135 const int j = s->idsp.idct_permutation[i];
136 int64_t den = (int64_t) qscale2 * quant_matrix[j];
137 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
138 * Assume x = qscale * quant_matrix[i]
140 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
141 * so 32768 >= (1 << 19) / (x) >= 67 */
142 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
143 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
144 // (qscale * quant_matrix[i]);
145 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
147 if (qmat16[qscale][0][i] == 0 ||
148 qmat16[qscale][0][i] == 128 * 256)
149 qmat16[qscale][0][i] = 128 * 256 - 1;
150 qmat16[qscale][1][i] =
151 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
152 qmat16[qscale][0][i]);
156 for (i = intra; i < 64; i++) {
158 if (fdsp->fdct == ff_fdct_ifast) {
159 max = (8191LL * ff_aanscales[i]) >> 14;
161 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
167 av_log(NULL, AV_LOG_INFO,
168 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
173 static inline void update_qscale(MpegEncContext *s)
175 if (s->q_scale_type == 1 && 0) {
177 int bestdiff=INT_MAX;
180 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
181 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
182 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
183 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
185 if (diff < bestdiff) {
192 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
193 (FF_LAMBDA_SHIFT + 7);
194 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
197 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
201 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
207 for (i = 0; i < 64; i++) {
208 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
215 * init s->current_picture.qscale_table from s->lambda_table
217 void ff_init_qscale_tab(MpegEncContext *s)
219 int8_t * const qscale_table = s->current_picture.qscale_table;
222 for (i = 0; i < s->mb_num; i++) {
223 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
224 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
225 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
230 static void update_duplicate_context_after_me(MpegEncContext *dst,
233 #define COPY(a) dst->a= src->a
235 COPY(current_picture);
241 COPY(picture_in_gop_number);
242 COPY(gop_picture_number);
243 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
244 COPY(progressive_frame); // FIXME don't set in encode_header
245 COPY(partitioned_frame); // FIXME don't set in encode_header
250 * Set the given MpegEncContext to defaults for encoding.
251 * the changed fields will not depend upon the prior state of the MpegEncContext.
253 static void mpv_encode_defaults(MpegEncContext *s)
256 ff_mpv_common_defaults(s);
258 for (i = -16; i < 16; i++) {
259 default_fcode_tab[i + MAX_MV] = 1;
261 s->me.mv_penalty = default_mv_penalty;
262 s->fcode_tab = default_fcode_tab;
264 s->input_picture_number = 0;
265 s->picture_in_gop_number = 0;
268 av_cold int ff_dct_encode_init(MpegEncContext *s) {
270 ff_dct_encode_init_x86(s);
272 if (CONFIG_H263_ENCODER)
273 ff_h263dsp_init(&s->h263dsp);
274 if (!s->dct_quantize)
275 s->dct_quantize = ff_dct_quantize_c;
277 s->denoise_dct = denoise_dct_c;
278 s->fast_dct_quantize = s->dct_quantize;
279 if (s->avctx->trellis)
280 s->dct_quantize = dct_quantize_trellis_c;
285 /* init video encoder */
286 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
288 MpegEncContext *s = avctx->priv_data;
289 AVCPBProperties *cpb_props;
290 int i, ret, format_supported;
292 mpv_encode_defaults(s);
294 switch (avctx->codec_id) {
295 case AV_CODEC_ID_MPEG2VIDEO:
296 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
297 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
298 av_log(avctx, AV_LOG_ERROR,
299 "only YUV420 and YUV422 are supported\n");
303 case AV_CODEC_ID_MJPEG:
304 case AV_CODEC_ID_AMV:
305 format_supported = 0;
306 /* JPEG color space */
307 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
308 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
309 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
310 (avctx->color_range == AVCOL_RANGE_JPEG &&
311 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
312 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
313 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
314 format_supported = 1;
315 /* MPEG color space */
316 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
317 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
318 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
319 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
320 format_supported = 1;
322 if (!format_supported) {
323 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
328 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
329 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
334 switch (avctx->pix_fmt) {
335 case AV_PIX_FMT_YUVJ444P:
336 case AV_PIX_FMT_YUV444P:
337 s->chroma_format = CHROMA_444;
339 case AV_PIX_FMT_YUVJ422P:
340 case AV_PIX_FMT_YUV422P:
341 s->chroma_format = CHROMA_422;
343 case AV_PIX_FMT_YUVJ420P:
344 case AV_PIX_FMT_YUV420P:
346 s->chroma_format = CHROMA_420;
350 s->bit_rate = avctx->bit_rate;
351 s->width = avctx->width;
352 s->height = avctx->height;
353 if (avctx->gop_size > 600 &&
354 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
355 av_log(avctx, AV_LOG_WARNING,
356 "keyframe interval too large!, reducing it from %d to %d\n",
357 avctx->gop_size, 600);
358 avctx->gop_size = 600;
360 s->gop_size = avctx->gop_size;
362 if (avctx->max_b_frames > MAX_B_FRAMES) {
363 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
364 "is %d.\n", MAX_B_FRAMES);
365 avctx->max_b_frames = MAX_B_FRAMES;
367 s->max_b_frames = avctx->max_b_frames;
368 s->codec_id = avctx->codec->id;
369 s->strict_std_compliance = avctx->strict_std_compliance;
370 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
371 s->mpeg_quant = avctx->mpeg_quant;
372 s->rtp_mode = !!avctx->rtp_payload_size;
373 s->intra_dc_precision = avctx->intra_dc_precision;
375 // workaround some differences between how applications specify dc precision
376 if (s->intra_dc_precision < 0) {
377 s->intra_dc_precision += 8;
378 } else if (s->intra_dc_precision >= 8)
379 s->intra_dc_precision -= 8;
381 if (s->intra_dc_precision < 0) {
382 av_log(avctx, AV_LOG_ERROR,
383 "intra dc precision must be positive, note some applications use"
384 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
385 return AVERROR(EINVAL);
388 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
389 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
390 return AVERROR(EINVAL);
392 s->user_specified_pts = AV_NOPTS_VALUE;
394 if (s->gop_size <= 1) {
401 #if FF_API_MOTION_EST
402 FF_DISABLE_DEPRECATION_WARNINGS
403 s->me_method = avctx->me_method;
404 FF_ENABLE_DEPRECATION_WARNINGS
408 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
411 FF_DISABLE_DEPRECATION_WARNINGS
412 if (avctx->border_masking != 0.0)
413 s->border_masking = avctx->border_masking;
414 FF_ENABLE_DEPRECATION_WARNINGS
417 s->adaptive_quant = (s->avctx->lumi_masking ||
418 s->avctx->dark_masking ||
419 s->avctx->temporal_cplx_masking ||
420 s->avctx->spatial_cplx_masking ||
421 s->avctx->p_masking ||
423 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
426 s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
428 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
429 switch(avctx->codec_id) {
430 case AV_CODEC_ID_MPEG1VIDEO:
431 case AV_CODEC_ID_MPEG2VIDEO:
432 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
434 case AV_CODEC_ID_MPEG4:
435 case AV_CODEC_ID_MSMPEG4V1:
436 case AV_CODEC_ID_MSMPEG4V2:
437 case AV_CODEC_ID_MSMPEG4V3:
438 if (avctx->rc_max_rate >= 15000000) {
439 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
440 } else if(avctx->rc_max_rate >= 2000000) {
441 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
442 } else if(avctx->rc_max_rate >= 384000) {
443 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
445 avctx->rc_buffer_size = 40;
446 avctx->rc_buffer_size *= 16384;
449 if (avctx->rc_buffer_size) {
450 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
454 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
455 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
459 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
460 av_log(avctx, AV_LOG_INFO,
461 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
464 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
465 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
469 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
470 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
474 if (avctx->rc_max_rate &&
475 avctx->rc_max_rate == avctx->bit_rate &&
476 avctx->rc_max_rate != avctx->rc_min_rate) {
477 av_log(avctx, AV_LOG_INFO,
478 "impossible bitrate constraints, this will fail\n");
481 if (avctx->rc_buffer_size &&
482 avctx->bit_rate * (int64_t)avctx->time_base.num >
483 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
484 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
488 if (!s->fixed_qscale &&
489 avctx->bit_rate * av_q2d(avctx->time_base) >
490 avctx->bit_rate_tolerance) {
491 av_log(avctx, AV_LOG_WARNING,
492 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, (int64_t)avctx->bit_rate);
493 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
496 if (s->avctx->rc_max_rate &&
497 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
498 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
499 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
500 90000LL * (avctx->rc_buffer_size - 1) >
501 s->avctx->rc_max_rate * 0xFFFFLL) {
502 av_log(avctx, AV_LOG_INFO,
503 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
504 "specified vbv buffer is too large for the given bitrate!\n");
507 if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
508 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
509 s->codec_id != AV_CODEC_ID_FLV1) {
510 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
514 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
515 av_log(avctx, AV_LOG_ERROR,
516 "OBMC is only supported with simple mb decision\n");
520 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
521 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
525 if (s->max_b_frames &&
526 s->codec_id != AV_CODEC_ID_MPEG4 &&
527 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
528 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
529 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
532 if (s->max_b_frames < 0) {
533 av_log(avctx, AV_LOG_ERROR,
534 "max b frames must be 0 or positive for mpegvideo based encoders\n");
538 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
539 s->codec_id == AV_CODEC_ID_H263 ||
540 s->codec_id == AV_CODEC_ID_H263P) &&
541 (avctx->sample_aspect_ratio.num > 255 ||
542 avctx->sample_aspect_ratio.den > 255)) {
543 av_log(avctx, AV_LOG_WARNING,
544 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
545 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
546 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
547 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
550 if ((s->codec_id == AV_CODEC_ID_H263 ||
551 s->codec_id == AV_CODEC_ID_H263P) &&
552 (avctx->width > 2048 ||
553 avctx->height > 1152 )) {
554 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
557 if ((s->codec_id == AV_CODEC_ID_H263 ||
558 s->codec_id == AV_CODEC_ID_H263P) &&
559 ((avctx->width &3) ||
560 (avctx->height&3) )) {
561 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
565 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
566 (avctx->width > 4095 ||
567 avctx->height > 4095 )) {
568 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
572 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
573 (avctx->width > 16383 ||
574 avctx->height > 16383 )) {
575 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
579 if (s->codec_id == AV_CODEC_ID_RV10 &&
581 avctx->height&15 )) {
582 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
583 return AVERROR(EINVAL);
586 if (s->codec_id == AV_CODEC_ID_RV20 &&
589 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
590 return AVERROR(EINVAL);
593 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
594 s->codec_id == AV_CODEC_ID_WMV2) &&
596 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
600 if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
601 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
602 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
606 // FIXME mpeg2 uses that too
607 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
608 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
609 av_log(avctx, AV_LOG_ERROR,
610 "mpeg2 style quantization not supported by codec\n");
614 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
615 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
619 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
620 s->avctx->mb_decision != FF_MB_DECISION_RD) {
621 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
625 if (s->avctx->scenechange_threshold < 1000000000 &&
626 (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
627 av_log(avctx, AV_LOG_ERROR,
628 "closed gop with scene change detection are not supported yet, "
629 "set threshold to 1000000000\n");
633 if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
634 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
635 av_log(avctx, AV_LOG_ERROR,
636 "low delay forcing is only available for mpeg2\n");
639 if (s->max_b_frames != 0) {
640 av_log(avctx, AV_LOG_ERROR,
641 "b frames cannot be used with low delay\n");
646 if (s->q_scale_type == 1) {
647 if (avctx->qmax > 28) {
648 av_log(avctx, AV_LOG_ERROR,
649 "non linear quant only supports qmax <= 28 currently\n");
654 if (avctx->slices > 1 &&
655 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
656 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
657 return AVERROR(EINVAL);
660 if (s->avctx->thread_count > 1 &&
661 s->codec_id != AV_CODEC_ID_MPEG4 &&
662 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
663 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
664 s->codec_id != AV_CODEC_ID_MJPEG &&
665 (s->codec_id != AV_CODEC_ID_H263P)) {
666 av_log(avctx, AV_LOG_ERROR,
667 "multi threaded encoding not supported by codec\n");
671 if (s->avctx->thread_count < 1) {
672 av_log(avctx, AV_LOG_ERROR,
673 "automatic thread number detection not supported by codec, "
678 if (!avctx->time_base.den || !avctx->time_base.num) {
679 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
683 if (avctx->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
684 av_log(avctx, AV_LOG_INFO,
685 "notice: b_frame_strategy only affects the first pass\n");
686 avctx->b_frame_strategy = 0;
689 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
691 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
692 avctx->time_base.den /= i;
693 avctx->time_base.num /= i;
697 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
698 // (a + x * 3 / 8) / x
699 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
700 s->inter_quant_bias = 0;
702 s->intra_quant_bias = 0;
704 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
707 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
708 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
709 return AVERROR(EINVAL);
712 #if FF_API_QUANT_BIAS
713 FF_DISABLE_DEPRECATION_WARNINGS
714 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
715 s->intra_quant_bias = avctx->intra_quant_bias;
716 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
717 s->inter_quant_bias = avctx->inter_quant_bias;
718 FF_ENABLE_DEPRECATION_WARNINGS
721 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
723 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
724 s->avctx->time_base.den > (1 << 16) - 1) {
725 av_log(avctx, AV_LOG_ERROR,
726 "timebase %d/%d not supported by MPEG 4 standard, "
727 "the maximum admitted value for the timebase denominator "
728 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
732 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
734 switch (avctx->codec->id) {
735 case AV_CODEC_ID_MPEG1VIDEO:
736 s->out_format = FMT_MPEG1;
737 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
738 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
740 case AV_CODEC_ID_MPEG2VIDEO:
741 s->out_format = FMT_MPEG1;
742 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
743 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
746 case AV_CODEC_ID_MJPEG:
747 case AV_CODEC_ID_AMV:
748 s->out_format = FMT_MJPEG;
749 s->intra_only = 1; /* force intra only for jpeg */
750 if (!CONFIG_MJPEG_ENCODER ||
751 ff_mjpeg_encode_init(s) < 0)
756 case AV_CODEC_ID_H261:
757 if (!CONFIG_H261_ENCODER)
759 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
760 av_log(avctx, AV_LOG_ERROR,
761 "The specified picture size of %dx%d is not valid for the "
762 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
763 s->width, s->height);
766 s->out_format = FMT_H261;
769 s->rtp_mode = 0; /* Sliced encoding not supported */
771 case AV_CODEC_ID_H263:
772 if (!CONFIG_H263_ENCODER)
774 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
775 s->width, s->height) == 8) {
776 av_log(avctx, AV_LOG_ERROR,
777 "The specified picture size of %dx%d is not valid for "
778 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
779 "352x288, 704x576, and 1408x1152. "
780 "Try H.263+.\n", s->width, s->height);
783 s->out_format = FMT_H263;
787 case AV_CODEC_ID_H263P:
788 s->out_format = FMT_H263;
791 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
792 s->modified_quant = s->h263_aic;
793 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
794 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
797 /* These are just to be sure */
801 case AV_CODEC_ID_FLV1:
802 s->out_format = FMT_H263;
803 s->h263_flv = 2; /* format = 1; 11-bit codes */
804 s->unrestricted_mv = 1;
805 s->rtp_mode = 0; /* don't allow GOB */
809 case AV_CODEC_ID_RV10:
810 s->out_format = FMT_H263;
814 case AV_CODEC_ID_RV20:
815 s->out_format = FMT_H263;
818 s->modified_quant = 1;
822 s->unrestricted_mv = 0;
824 case AV_CODEC_ID_MPEG4:
825 s->out_format = FMT_H263;
827 s->unrestricted_mv = 1;
828 s->low_delay = s->max_b_frames ? 0 : 1;
829 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
831 case AV_CODEC_ID_MSMPEG4V2:
832 s->out_format = FMT_H263;
834 s->unrestricted_mv = 1;
835 s->msmpeg4_version = 2;
839 case AV_CODEC_ID_MSMPEG4V3:
840 s->out_format = FMT_H263;
842 s->unrestricted_mv = 1;
843 s->msmpeg4_version = 3;
844 s->flipflop_rounding = 1;
848 case AV_CODEC_ID_WMV1:
849 s->out_format = FMT_H263;
851 s->unrestricted_mv = 1;
852 s->msmpeg4_version = 4;
853 s->flipflop_rounding = 1;
857 case AV_CODEC_ID_WMV2:
858 s->out_format = FMT_H263;
860 s->unrestricted_mv = 1;
861 s->msmpeg4_version = 5;
862 s->flipflop_rounding = 1;
870 avctx->has_b_frames = !s->low_delay;
874 s->progressive_frame =
875 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
876 AV_CODEC_FLAG_INTERLACED_ME) ||
881 if (ff_mpv_common_init(s) < 0)
884 ff_fdctdsp_init(&s->fdsp, avctx);
885 ff_me_cmp_init(&s->mecc, avctx);
886 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
887 ff_pixblockdsp_init(&s->pdsp, avctx);
888 ff_qpeldsp_init(&s->qdsp);
890 if (s->msmpeg4_version) {
891 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
892 2 * 2 * (MAX_LEVEL + 1) *
893 (MAX_RUN + 1) * 2 * sizeof(int), fail);
895 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
897 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
898 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
899 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
900 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
901 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
902 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
903 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
904 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
905 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
906 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
908 if (s->avctx->noise_reduction) {
909 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
910 2 * 64 * sizeof(uint16_t), fail);
913 ff_dct_encode_init(s);
915 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
916 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
918 if (s->slice_context_count > 1) {
921 if (avctx->codec_id == AV_CODEC_ID_H263P)
922 s->h263_slice_structured = 1;
925 s->quant_precision = 5;
927 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
928 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->avctx->frame_skip_cmp);
930 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
931 ff_h261_encode_init(s);
932 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
933 ff_h263_encode_init(s);
934 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
935 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
937 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
938 && s->out_format == FMT_MPEG1)
939 ff_mpeg1_encode_init(s);
942 for (i = 0; i < 64; i++) {
943 int j = s->idsp.idct_permutation[i];
944 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
946 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
947 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
948 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
950 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
953 s->chroma_intra_matrix[j] =
954 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
955 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
957 if (s->avctx->intra_matrix)
958 s->intra_matrix[j] = s->avctx->intra_matrix[i];
959 if (s->avctx->inter_matrix)
960 s->inter_matrix[j] = s->avctx->inter_matrix[i];
963 /* precompute matrix */
964 /* for mjpeg, we do include qscale in the matrix */
965 if (s->out_format != FMT_MJPEG) {
966 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
967 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
969 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
970 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
974 if (ff_rate_control_init(s) < 0)
977 #if FF_API_ERROR_RATE
978 FF_DISABLE_DEPRECATION_WARNINGS
979 if (avctx->error_rate)
980 s->error_rate = avctx->error_rate;
981 FF_ENABLE_DEPRECATION_WARNINGS;
984 #if FF_API_NORMALIZE_AQP
985 FF_DISABLE_DEPRECATION_WARNINGS
986 if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
987 s->mpv_flags |= FF_MPV_FLAG_NAQ;
988 FF_ENABLE_DEPRECATION_WARNINGS;
992 FF_DISABLE_DEPRECATION_WARNINGS
993 if (avctx->flags & CODEC_FLAG_MV0)
994 s->mpv_flags |= FF_MPV_FLAG_MV0;
995 FF_ENABLE_DEPRECATION_WARNINGS
999 FF_DISABLE_DEPRECATION_WARNINGS
1000 if (avctx->rc_qsquish != 0.0)
1001 s->rc_qsquish = avctx->rc_qsquish;
1002 if (avctx->rc_qmod_amp != 0.0)
1003 s->rc_qmod_amp = avctx->rc_qmod_amp;
1004 if (avctx->rc_qmod_freq)
1005 s->rc_qmod_freq = avctx->rc_qmod_freq;
1006 if (avctx->rc_buffer_aggressivity != 1.0)
1007 s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
1008 if (avctx->rc_initial_cplx != 0.0)
1009 s->rc_initial_cplx = avctx->rc_initial_cplx;
1011 s->lmin = avctx->lmin;
1013 s->lmax = avctx->lmax;
1016 av_freep(&s->rc_eq);
1017 s->rc_eq = av_strdup(avctx->rc_eq);
1019 return AVERROR(ENOMEM);
1021 FF_ENABLE_DEPRECATION_WARNINGS
1024 if (avctx->b_frame_strategy == 2) {
1025 for (i = 0; i < s->max_b_frames + 2; i++) {
1026 s->tmp_frames[i] = av_frame_alloc();
1027 if (!s->tmp_frames[i])
1028 return AVERROR(ENOMEM);
1030 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1031 s->tmp_frames[i]->width = s->width >> avctx->brd_scale;
1032 s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
1034 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1040 cpb_props = ff_add_cpb_side_data(avctx);
1042 return AVERROR(ENOMEM);
1043 cpb_props->max_bitrate = avctx->rc_max_rate;
1044 cpb_props->min_bitrate = avctx->rc_min_rate;
1045 cpb_props->avg_bitrate = avctx->bit_rate;
1046 cpb_props->buffer_size = avctx->rc_buffer_size;
1050 ff_mpv_encode_end(avctx);
1051 return AVERROR_UNKNOWN;
1054 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1056 MpegEncContext *s = avctx->priv_data;
1059 ff_rate_control_uninit(s);
1061 ff_mpv_common_end(s);
1062 if (CONFIG_MJPEG_ENCODER &&
1063 s->out_format == FMT_MJPEG)
1064 ff_mjpeg_encode_close(s);
1066 av_freep(&avctx->extradata);
1068 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1069 av_frame_free(&s->tmp_frames[i]);
1071 ff_free_picture_tables(&s->new_picture);
1072 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1074 av_freep(&s->avctx->stats_out);
1075 av_freep(&s->ac_stats);
1077 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1078 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1079 s->q_chroma_intra_matrix= NULL;
1080 s->q_chroma_intra_matrix16= NULL;
1081 av_freep(&s->q_intra_matrix);
1082 av_freep(&s->q_inter_matrix);
1083 av_freep(&s->q_intra_matrix16);
1084 av_freep(&s->q_inter_matrix16);
1085 av_freep(&s->input_picture);
1086 av_freep(&s->reordered_input_picture);
1087 av_freep(&s->dct_offset);
1092 static int get_sae(uint8_t *src, int ref, int stride)
1097 for (y = 0; y < 16; y++) {
1098 for (x = 0; x < 16; x++) {
1099 acc += FFABS(src[x + y * stride] - ref);
1106 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1107 uint8_t *ref, int stride)
1113 h = s->height & ~15;
1115 for (y = 0; y < h; y += 16) {
1116 for (x = 0; x < w; x += 16) {
1117 int offset = x + y * stride;
1118 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1120 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1121 int sae = get_sae(src + offset, mean, stride);
1123 acc += sae + 500 < sad;
1129 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1131 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1132 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1133 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1134 &s->linesize, &s->uvlinesize);
1137 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1139 Picture *pic = NULL;
1141 int i, display_picture_number = 0, ret;
1142 int encoding_delay = s->max_b_frames ? s->max_b_frames
1143 : (s->low_delay ? 0 : 1);
1144 int flush_offset = 1;
1149 display_picture_number = s->input_picture_number++;
1151 if (pts != AV_NOPTS_VALUE) {
1152 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1153 int64_t last = s->user_specified_pts;
1156 av_log(s->avctx, AV_LOG_ERROR,
1157 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1159 return AVERROR(EINVAL);
1162 if (!s->low_delay && display_picture_number == 1)
1163 s->dts_delta = pts - last;
1165 s->user_specified_pts = pts;
1167 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1168 s->user_specified_pts =
1169 pts = s->user_specified_pts + 1;
1170 av_log(s->avctx, AV_LOG_INFO,
1171 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1174 pts = display_picture_number;
1178 if (!pic_arg->buf[0] ||
1179 pic_arg->linesize[0] != s->linesize ||
1180 pic_arg->linesize[1] != s->uvlinesize ||
1181 pic_arg->linesize[2] != s->uvlinesize)
1183 if ((s->width & 15) || (s->height & 15))
1185 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1187 if (s->linesize & (STRIDE_ALIGN-1))
1190 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1191 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1193 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1197 pic = &s->picture[i];
1201 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1204 ret = alloc_picture(s, pic, direct);
1209 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1210 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1211 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1214 int h_chroma_shift, v_chroma_shift;
1215 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1219 for (i = 0; i < 3; i++) {
1220 int src_stride = pic_arg->linesize[i];
1221 int dst_stride = i ? s->uvlinesize : s->linesize;
1222 int h_shift = i ? h_chroma_shift : 0;
1223 int v_shift = i ? v_chroma_shift : 0;
1224 int w = s->width >> h_shift;
1225 int h = s->height >> v_shift;
1226 uint8_t *src = pic_arg->data[i];
1227 uint8_t *dst = pic->f->data[i];
1230 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1231 && !s->progressive_sequence
1232 && FFALIGN(s->height, 32) - s->height > 16)
1235 if (!s->avctx->rc_buffer_size)
1236 dst += INPLACE_OFFSET;
1238 if (src_stride == dst_stride)
1239 memcpy(dst, src, src_stride * h);
1242 uint8_t *dst2 = dst;
1244 memcpy(dst2, src, w);
1249 if ((s->width & 15) || (s->height & (vpad-1))) {
1250 s->mpvencdsp.draw_edges(dst, dst_stride,
1259 ret = av_frame_copy_props(pic->f, pic_arg);
1263 pic->f->display_picture_number = display_picture_number;
1264 pic->f->pts = pts; // we set this here to avoid modifiying pic_arg
1266 /* Flushing: When we have not received enough input frames,
1267 * ensure s->input_picture[0] contains the first picture */
1268 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1269 if (s->input_picture[flush_offset])
1272 if (flush_offset <= 1)
1275 encoding_delay = encoding_delay - flush_offset + 1;
1278 /* shift buffer entries */
1279 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1280 s->input_picture[i - flush_offset] = s->input_picture[i];
1282 s->input_picture[encoding_delay] = (Picture*) pic;
1287 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1291 int64_t score64 = 0;
1293 for (plane = 0; plane < 3; plane++) {
1294 const int stride = p->f->linesize[plane];
1295 const int bw = plane ? 1 : 2;
1296 for (y = 0; y < s->mb_height * bw; y++) {
1297 for (x = 0; x < s->mb_width * bw; x++) {
1298 int off = p->shared ? 0 : 16;
1299 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1300 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1301 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1303 switch (FFABS(s->avctx->frame_skip_exp)) {
1304 case 0: score = FFMAX(score, v); break;
1305 case 1: score += FFABS(v); break;
1306 case 2: score64 += v * (int64_t)v; break;
1307 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1308 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1317 if (s->avctx->frame_skip_exp < 0)
1318 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1319 -1.0/s->avctx->frame_skip_exp);
1321 if (score64 < s->avctx->frame_skip_threshold)
1323 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1328 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1330 AVPacket pkt = { 0 };
1331 int ret, got_output;
1333 av_init_packet(&pkt);
1334 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1339 av_packet_unref(&pkt);
1343 static int estimate_best_b_count(MpegEncContext *s)
1345 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1346 AVCodecContext *c = avcodec_alloc_context3(NULL);
1347 const int scale = s->avctx->brd_scale;
1348 int i, j, out_size, p_lambda, b_lambda, lambda2;
1349 int64_t best_rd = INT64_MAX;
1350 int best_b_count = -1;
1353 return AVERROR(ENOMEM);
1354 av_assert0(scale >= 0 && scale <= 3);
1357 //s->next_picture_ptr->quality;
1358 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1359 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1360 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1361 if (!b_lambda) // FIXME we should do this somewhere else
1362 b_lambda = p_lambda;
1363 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1366 c->width = s->width >> scale;
1367 c->height = s->height >> scale;
1368 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1369 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1370 c->mb_decision = s->avctx->mb_decision;
1371 c->me_cmp = s->avctx->me_cmp;
1372 c->mb_cmp = s->avctx->mb_cmp;
1373 c->me_sub_cmp = s->avctx->me_sub_cmp;
1374 c->pix_fmt = AV_PIX_FMT_YUV420P;
1375 c->time_base = s->avctx->time_base;
1376 c->max_b_frames = s->max_b_frames;
1378 if (avcodec_open2(c, codec, NULL) < 0)
1381 for (i = 0; i < s->max_b_frames + 2; i++) {
1382 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1383 s->next_picture_ptr;
1386 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1387 pre_input = *pre_input_ptr;
1388 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1390 if (!pre_input.shared && i) {
1391 data[0] += INPLACE_OFFSET;
1392 data[1] += INPLACE_OFFSET;
1393 data[2] += INPLACE_OFFSET;
1396 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1397 s->tmp_frames[i]->linesize[0],
1399 pre_input.f->linesize[0],
1400 c->width, c->height);
1401 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1402 s->tmp_frames[i]->linesize[1],
1404 pre_input.f->linesize[1],
1405 c->width >> 1, c->height >> 1);
1406 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1407 s->tmp_frames[i]->linesize[2],
1409 pre_input.f->linesize[2],
1410 c->width >> 1, c->height >> 1);
1414 for (j = 0; j < s->max_b_frames + 1; j++) {
1417 if (!s->input_picture[j])
1420 c->error[0] = c->error[1] = c->error[2] = 0;
1422 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1423 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1425 out_size = encode_frame(c, s->tmp_frames[0]);
1427 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1429 for (i = 0; i < s->max_b_frames + 1; i++) {
1430 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1432 s->tmp_frames[i + 1]->pict_type = is_p ?
1433 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1434 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1436 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1438 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1441 /* get the delayed frames */
1443 out_size = encode_frame(c, NULL);
1444 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1447 rd += c->error[0] + c->error[1] + c->error[2];
1458 return best_b_count;
1461 static int select_input_picture(MpegEncContext *s)
1465 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1466 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1467 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1469 /* set next picture type & ordering */
1470 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1471 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1472 if (s->picture_in_gop_number < s->gop_size &&
1473 s->next_picture_ptr &&
1474 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1475 // FIXME check that te gop check above is +-1 correct
1476 av_frame_unref(s->input_picture[0]->f);
1478 ff_vbv_update(s, 0);
1484 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1485 !s->next_picture_ptr || s->intra_only) {
1486 s->reordered_input_picture[0] = s->input_picture[0];
1487 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1488 s->reordered_input_picture[0]->f->coded_picture_number =
1489 s->coded_picture_number++;
1493 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1494 for (i = 0; i < s->max_b_frames + 1; i++) {
1495 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1497 if (pict_num >= s->rc_context.num_entries)
1499 if (!s->input_picture[i]) {
1500 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1504 s->input_picture[i]->f->pict_type =
1505 s->rc_context.entry[pict_num].new_pict_type;
1509 if (s->avctx->b_frame_strategy == 0) {
1510 b_frames = s->max_b_frames;
1511 while (b_frames && !s->input_picture[b_frames])
1513 } else if (s->avctx->b_frame_strategy == 1) {
1514 for (i = 1; i < s->max_b_frames + 1; i++) {
1515 if (s->input_picture[i] &&
1516 s->input_picture[i]->b_frame_score == 0) {
1517 s->input_picture[i]->b_frame_score =
1519 s->input_picture[i ]->f->data[0],
1520 s->input_picture[i - 1]->f->data[0],
1524 for (i = 0; i < s->max_b_frames + 1; i++) {
1525 if (!s->input_picture[i] ||
1526 s->input_picture[i]->b_frame_score - 1 >
1527 s->mb_num / s->avctx->b_sensitivity)
1531 b_frames = FFMAX(0, i - 1);
1534 for (i = 0; i < b_frames + 1; i++) {
1535 s->input_picture[i]->b_frame_score = 0;
1537 } else if (s->avctx->b_frame_strategy == 2) {
1538 b_frames = estimate_best_b_count(s);
1540 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1546 for (i = b_frames - 1; i >= 0; i--) {
1547 int type = s->input_picture[i]->f->pict_type;
1548 if (type && type != AV_PICTURE_TYPE_B)
1551 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1552 b_frames == s->max_b_frames) {
1553 av_log(s->avctx, AV_LOG_ERROR,
1554 "warning, too many b frames in a row\n");
1557 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1558 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1559 s->gop_size > s->picture_in_gop_number) {
1560 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1562 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1564 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1568 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1569 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1572 s->reordered_input_picture[0] = s->input_picture[b_frames];
1573 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1574 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1575 s->reordered_input_picture[0]->f->coded_picture_number =
1576 s->coded_picture_number++;
1577 for (i = 0; i < b_frames; i++) {
1578 s->reordered_input_picture[i + 1] = s->input_picture[i];
1579 s->reordered_input_picture[i + 1]->f->pict_type =
1581 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1582 s->coded_picture_number++;
1587 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1589 if (s->reordered_input_picture[0]) {
1590 s->reordered_input_picture[0]->reference =
1591 s->reordered_input_picture[0]->f->pict_type !=
1592 AV_PICTURE_TYPE_B ? 3 : 0;
1594 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1597 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1598 // input is a shared pix, so we can't modifiy it -> alloc a new
1599 // one & ensure that the shared one is reuseable
1602 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1605 pic = &s->picture[i];
1607 pic->reference = s->reordered_input_picture[0]->reference;
1608 if (alloc_picture(s, pic, 0) < 0) {
1612 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1616 /* mark us unused / free shared pic */
1617 av_frame_unref(s->reordered_input_picture[0]->f);
1618 s->reordered_input_picture[0]->shared = 0;
1620 s->current_picture_ptr = pic;
1622 // input is not a shared pix -> reuse buffer for current_pix
1623 s->current_picture_ptr = s->reordered_input_picture[0];
1624 for (i = 0; i < 4; i++) {
1625 s->new_picture.f->data[i] += INPLACE_OFFSET;
1628 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1629 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1630 s->current_picture_ptr)) < 0)
1633 s->picture_number = s->new_picture.f->display_picture_number;
1638 static void frame_end(MpegEncContext *s)
1640 if (s->unrestricted_mv &&
1641 s->current_picture.reference &&
1643 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1644 int hshift = desc->log2_chroma_w;
1645 int vshift = desc->log2_chroma_h;
1646 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1647 s->current_picture.f->linesize[0],
1648 s->h_edge_pos, s->v_edge_pos,
1649 EDGE_WIDTH, EDGE_WIDTH,
1650 EDGE_TOP | EDGE_BOTTOM);
1651 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1652 s->current_picture.f->linesize[1],
1653 s->h_edge_pos >> hshift,
1654 s->v_edge_pos >> vshift,
1655 EDGE_WIDTH >> hshift,
1656 EDGE_WIDTH >> vshift,
1657 EDGE_TOP | EDGE_BOTTOM);
1658 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1659 s->current_picture.f->linesize[2],
1660 s->h_edge_pos >> hshift,
1661 s->v_edge_pos >> vshift,
1662 EDGE_WIDTH >> hshift,
1663 EDGE_WIDTH >> vshift,
1664 EDGE_TOP | EDGE_BOTTOM);
1669 s->last_pict_type = s->pict_type;
1670 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1671 if (s->pict_type!= AV_PICTURE_TYPE_B)
1672 s->last_non_b_pict_type = s->pict_type;
1674 #if FF_API_CODED_FRAME
1675 FF_DISABLE_DEPRECATION_WARNINGS
1676 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1677 FF_ENABLE_DEPRECATION_WARNINGS
1679 #if FF_API_ERROR_FRAME
1680 FF_DISABLE_DEPRECATION_WARNINGS
1681 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1682 sizeof(s->current_picture.encoding_error));
1683 FF_ENABLE_DEPRECATION_WARNINGS
1687 static void update_noise_reduction(MpegEncContext *s)
1691 for (intra = 0; intra < 2; intra++) {
1692 if (s->dct_count[intra] > (1 << 16)) {
1693 for (i = 0; i < 64; i++) {
1694 s->dct_error_sum[intra][i] >>= 1;
1696 s->dct_count[intra] >>= 1;
1699 for (i = 0; i < 64; i++) {
1700 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1701 s->dct_count[intra] +
1702 s->dct_error_sum[intra][i] / 2) /
1703 (s->dct_error_sum[intra][i] + 1);
1708 static int frame_start(MpegEncContext *s)
1712 /* mark & release old frames */
1713 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1714 s->last_picture_ptr != s->next_picture_ptr &&
1715 s->last_picture_ptr->f->buf[0]) {
1716 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1719 s->current_picture_ptr->f->pict_type = s->pict_type;
1720 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1722 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1723 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1724 s->current_picture_ptr)) < 0)
1727 if (s->pict_type != AV_PICTURE_TYPE_B) {
1728 s->last_picture_ptr = s->next_picture_ptr;
1730 s->next_picture_ptr = s->current_picture_ptr;
1733 if (s->last_picture_ptr) {
1734 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1735 if (s->last_picture_ptr->f->buf[0] &&
1736 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1737 s->last_picture_ptr)) < 0)
1740 if (s->next_picture_ptr) {
1741 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1742 if (s->next_picture_ptr->f->buf[0] &&
1743 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1744 s->next_picture_ptr)) < 0)
1748 if (s->picture_structure!= PICT_FRAME) {
1750 for (i = 0; i < 4; i++) {
1751 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1752 s->current_picture.f->data[i] +=
1753 s->current_picture.f->linesize[i];
1755 s->current_picture.f->linesize[i] *= 2;
1756 s->last_picture.f->linesize[i] *= 2;
1757 s->next_picture.f->linesize[i] *= 2;
1761 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1762 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1763 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1764 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1765 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1766 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1768 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1769 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1772 if (s->dct_error_sum) {
1773 av_assert2(s->avctx->noise_reduction && s->encoding);
1774 update_noise_reduction(s);
1780 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1781 const AVFrame *pic_arg, int *got_packet)
1783 MpegEncContext *s = avctx->priv_data;
1784 int i, stuffing_count, ret;
1785 int context_count = s->slice_context_count;
1787 s->vbv_ignore_qmax = 0;
1789 s->picture_in_gop_number++;
1791 if (load_input_picture(s, pic_arg) < 0)
1794 if (select_input_picture(s) < 0) {
1799 if (s->new_picture.f->data[0]) {
1800 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1801 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1803 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1804 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1807 s->mb_info_ptr = av_packet_new_side_data(pkt,
1808 AV_PKT_DATA_H263_MB_INFO,
1809 s->mb_width*s->mb_height*12);
1810 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1813 for (i = 0; i < context_count; i++) {
1814 int start_y = s->thread_context[i]->start_mb_y;
1815 int end_y = s->thread_context[i]-> end_mb_y;
1816 int h = s->mb_height;
1817 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1818 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1820 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1823 s->pict_type = s->new_picture.f->pict_type;
1825 ret = frame_start(s);
1829 ret = encode_picture(s, s->picture_number);
1830 if (growing_buffer) {
1831 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1832 pkt->data = s->pb.buf;
1833 pkt->size = avctx->internal->byte_buffer_size;
1838 avctx->header_bits = s->header_bits;
1839 avctx->mv_bits = s->mv_bits;
1840 avctx->misc_bits = s->misc_bits;
1841 avctx->i_tex_bits = s->i_tex_bits;
1842 avctx->p_tex_bits = s->p_tex_bits;
1843 avctx->i_count = s->i_count;
1844 // FIXME f/b_count in avctx
1845 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1846 avctx->skip_count = s->skip_count;
1850 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1851 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1853 if (avctx->rc_buffer_size) {
1854 RateControlContext *rcc = &s->rc_context;
1855 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1856 int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1857 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1859 if (put_bits_count(&s->pb) > max_size &&
1860 s->lambda < s->lmax) {
1861 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1862 (s->qscale + 1) / s->qscale);
1863 if (s->adaptive_quant) {
1865 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1866 s->lambda_table[i] =
1867 FFMAX(s->lambda_table[i] + min_step,
1868 s->lambda_table[i] * (s->qscale + 1) /
1871 s->mb_skipped = 0; // done in frame_start()
1872 // done in encode_picture() so we must undo it
1873 if (s->pict_type == AV_PICTURE_TYPE_P) {
1874 if (s->flipflop_rounding ||
1875 s->codec_id == AV_CODEC_ID_H263P ||
1876 s->codec_id == AV_CODEC_ID_MPEG4)
1877 s->no_rounding ^= 1;
1879 if (s->pict_type != AV_PICTURE_TYPE_B) {
1880 s->time_base = s->last_time_base;
1881 s->last_non_b_time = s->time - s->pp_time;
1883 for (i = 0; i < context_count; i++) {
1884 PutBitContext *pb = &s->thread_context[i]->pb;
1885 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1887 s->vbv_ignore_qmax = 1;
1888 av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1892 av_assert0(s->avctx->rc_max_rate);
1895 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1896 ff_write_pass1_stats(s);
1898 for (i = 0; i < 4; i++) {
1899 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1900 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1902 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1903 s->current_picture_ptr->encoding_error,
1904 (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1907 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1908 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1909 avctx->i_tex_bits + avctx->p_tex_bits ==
1910 put_bits_count(&s->pb));
1911 flush_put_bits(&s->pb);
1912 s->frame_bits = put_bits_count(&s->pb);
1914 stuffing_count = ff_vbv_update(s, s->frame_bits);
1915 s->stuffing_bits = 8*stuffing_count;
1916 if (stuffing_count) {
1917 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1918 stuffing_count + 50) {
1919 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1923 switch (s->codec_id) {
1924 case AV_CODEC_ID_MPEG1VIDEO:
1925 case AV_CODEC_ID_MPEG2VIDEO:
1926 while (stuffing_count--) {
1927 put_bits(&s->pb, 8, 0);
1930 case AV_CODEC_ID_MPEG4:
1931 put_bits(&s->pb, 16, 0);
1932 put_bits(&s->pb, 16, 0x1C3);
1933 stuffing_count -= 4;
1934 while (stuffing_count--) {
1935 put_bits(&s->pb, 8, 0xFF);
1939 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1941 flush_put_bits(&s->pb);
1942 s->frame_bits = put_bits_count(&s->pb);
1945 /* update mpeg1/2 vbv_delay for CBR */
1946 if (s->avctx->rc_max_rate &&
1947 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1948 s->out_format == FMT_MPEG1 &&
1949 90000LL * (avctx->rc_buffer_size - 1) <=
1950 s->avctx->rc_max_rate * 0xFFFFLL) {
1951 AVCPBProperties *props;
1954 int vbv_delay, min_delay;
1955 double inbits = s->avctx->rc_max_rate *
1956 av_q2d(s->avctx->time_base);
1957 int minbits = s->frame_bits - 8 *
1958 (s->vbv_delay_ptr - s->pb.buf - 1);
1959 double bits = s->rc_context.buffer_index + minbits - inbits;
1962 av_log(s->avctx, AV_LOG_ERROR,
1963 "Internal error, negative bits\n");
1965 assert(s->repeat_first_field == 0);
1967 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1968 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1969 s->avctx->rc_max_rate;
1971 vbv_delay = FFMAX(vbv_delay, min_delay);
1973 av_assert0(vbv_delay < 0xFFFF);
1975 s->vbv_delay_ptr[0] &= 0xF8;
1976 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1977 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1978 s->vbv_delay_ptr[2] &= 0x07;
1979 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1981 props = av_cpb_properties_alloc(&props_size);
1983 return AVERROR(ENOMEM);
1984 props->vbv_delay = vbv_delay * 300;
1986 #if FF_API_VBV_DELAY
1987 FF_DISABLE_DEPRECATION_WARNINGS
1988 avctx->vbv_delay = vbv_delay * 300;
1989 FF_ENABLE_DEPRECATION_WARNINGS
1992 s->total_bits += s->frame_bits;
1993 avctx->frame_bits = s->frame_bits;
1995 pkt->pts = s->current_picture.f->pts;
1996 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1997 if (!s->current_picture.f->coded_picture_number)
1998 pkt->dts = pkt->pts - s->dts_delta;
2000 pkt->dts = s->reordered_pts;
2001 s->reordered_pts = pkt->pts;
2003 pkt->dts = pkt->pts;
2004 if (s->current_picture.f->key_frame)
2005 pkt->flags |= AV_PKT_FLAG_KEY;
2007 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2012 /* release non-reference frames */
2013 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2014 if (!s->picture[i].reference)
2015 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2018 av_assert1((s->frame_bits & 7) == 0);
2020 pkt->size = s->frame_bits / 8;
2021 *got_packet = !!pkt->size;
2025 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2026 int n, int threshold)
2028 static const char tab[64] = {
2029 3, 2, 2, 1, 1, 1, 1, 1,
2030 1, 1, 1, 1, 1, 1, 1, 1,
2031 1, 1, 1, 1, 1, 1, 1, 1,
2032 0, 0, 0, 0, 0, 0, 0, 0,
2033 0, 0, 0, 0, 0, 0, 0, 0,
2034 0, 0, 0, 0, 0, 0, 0, 0,
2035 0, 0, 0, 0, 0, 0, 0, 0,
2036 0, 0, 0, 0, 0, 0, 0, 0
2041 int16_t *block = s->block[n];
2042 const int last_index = s->block_last_index[n];
2045 if (threshold < 0) {
2047 threshold = -threshold;
2051 /* Are all we could set to zero already zero? */
2052 if (last_index <= skip_dc - 1)
2055 for (i = 0; i <= last_index; i++) {
2056 const int j = s->intra_scantable.permutated[i];
2057 const int level = FFABS(block[j]);
2059 if (skip_dc && i == 0)
2063 } else if (level > 1) {
2069 if (score >= threshold)
2071 for (i = skip_dc; i <= last_index; i++) {
2072 const int j = s->intra_scantable.permutated[i];
2076 s->block_last_index[n] = 0;
2078 s->block_last_index[n] = -1;
2081 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2085 const int maxlevel = s->max_qcoeff;
2086 const int minlevel = s->min_qcoeff;
2090 i = 1; // skip clipping of intra dc
2094 for (; i <= last_index; i++) {
2095 const int j = s->intra_scantable.permutated[i];
2096 int level = block[j];
2098 if (level > maxlevel) {
2101 } else if (level < minlevel) {
2109 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2110 av_log(s->avctx, AV_LOG_INFO,
2111 "warning, clipping %d dct coefficients to %d..%d\n",
2112 overflow, minlevel, maxlevel);
2115 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2119 for (y = 0; y < 8; y++) {
2120 for (x = 0; x < 8; x++) {
2126 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2127 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2128 int v = ptr[x2 + y2 * stride];
2134 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2139 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2140 int motion_x, int motion_y,
2141 int mb_block_height,
2145 int16_t weight[12][64];
2146 int16_t orig[12][64];
2147 const int mb_x = s->mb_x;
2148 const int mb_y = s->mb_y;
2151 int dct_offset = s->linesize * 8; // default for progressive frames
2152 int uv_dct_offset = s->uvlinesize * 8;
2153 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2154 ptrdiff_t wrap_y, wrap_c;
2156 for (i = 0; i < mb_block_count; i++)
2157 skip_dct[i] = s->skipdct;
2159 if (s->adaptive_quant) {
2160 const int last_qp = s->qscale;
2161 const int mb_xy = mb_x + mb_y * s->mb_stride;
2163 s->lambda = s->lambda_table[mb_xy];
2166 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2167 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2168 s->dquant = s->qscale - last_qp;
2170 if (s->out_format == FMT_H263) {
2171 s->dquant = av_clip(s->dquant, -2, 2);
2173 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2175 if (s->pict_type == AV_PICTURE_TYPE_B) {
2176 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2179 if (s->mv_type == MV_TYPE_8X8)
2185 ff_set_qscale(s, last_qp + s->dquant);
2186 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2187 ff_set_qscale(s, s->qscale + s->dquant);
2189 wrap_y = s->linesize;
2190 wrap_c = s->uvlinesize;
2191 ptr_y = s->new_picture.f->data[0] +
2192 (mb_y * 16 * wrap_y) + mb_x * 16;
2193 ptr_cb = s->new_picture.f->data[1] +
2194 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2195 ptr_cr = s->new_picture.f->data[2] +
2196 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2198 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2199 uint8_t *ebuf = s->sc.edge_emu_buffer + 36 * wrap_y;
2200 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2201 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2202 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2204 16, 16, mb_x * 16, mb_y * 16,
2205 s->width, s->height);
2207 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2209 mb_block_width, mb_block_height,
2210 mb_x * mb_block_width, mb_y * mb_block_height,
2212 ptr_cb = ebuf + 16 * wrap_y;
2213 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2215 mb_block_width, mb_block_height,
2216 mb_x * mb_block_width, mb_y * mb_block_height,
2218 ptr_cr = ebuf + 16 * wrap_y + 16;
2222 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2223 int progressive_score, interlaced_score;
2225 s->interlaced_dct = 0;
2226 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2227 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2228 NULL, wrap_y, 8) - 400;
2230 if (progressive_score > 0) {
2231 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2232 NULL, wrap_y * 2, 8) +
2233 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2234 NULL, wrap_y * 2, 8);
2235 if (progressive_score > interlaced_score) {
2236 s->interlaced_dct = 1;
2238 dct_offset = wrap_y;
2239 uv_dct_offset = wrap_c;
2241 if (s->chroma_format == CHROMA_422 ||
2242 s->chroma_format == CHROMA_444)
2248 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2249 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2250 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2251 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2253 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2257 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2258 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2259 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2260 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2261 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2262 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2263 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2264 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2265 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2266 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2267 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2268 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2272 op_pixels_func (*op_pix)[4];
2273 qpel_mc_func (*op_qpix)[16];
2274 uint8_t *dest_y, *dest_cb, *dest_cr;
2276 dest_y = s->dest[0];
2277 dest_cb = s->dest[1];
2278 dest_cr = s->dest[2];
2280 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2281 op_pix = s->hdsp.put_pixels_tab;
2282 op_qpix = s->qdsp.put_qpel_pixels_tab;
2284 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2285 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2288 if (s->mv_dir & MV_DIR_FORWARD) {
2289 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2290 s->last_picture.f->data,
2292 op_pix = s->hdsp.avg_pixels_tab;
2293 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2295 if (s->mv_dir & MV_DIR_BACKWARD) {
2296 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2297 s->next_picture.f->data,
2301 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2302 int progressive_score, interlaced_score;
2304 s->interlaced_dct = 0;
2305 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2306 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2310 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2311 progressive_score -= 400;
2313 if (progressive_score > 0) {
2314 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2316 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2320 if (progressive_score > interlaced_score) {
2321 s->interlaced_dct = 1;
2323 dct_offset = wrap_y;
2324 uv_dct_offset = wrap_c;
2326 if (s->chroma_format == CHROMA_422)
2332 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2333 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2334 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2335 dest_y + dct_offset, wrap_y);
2336 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2337 dest_y + dct_offset + 8, wrap_y);
2339 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2343 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2344 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2345 if (!s->chroma_y_shift) { /* 422 */
2346 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2347 dest_cb + uv_dct_offset, wrap_c);
2348 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2349 dest_cr + uv_dct_offset, wrap_c);
2352 /* pre quantization */
2353 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2354 2 * s->qscale * s->qscale) {
2356 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2358 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2360 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2361 wrap_y, 8) < 20 * s->qscale)
2363 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2364 wrap_y, 8) < 20 * s->qscale)
2366 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2368 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2370 if (!s->chroma_y_shift) { /* 422 */
2371 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2372 dest_cb + uv_dct_offset,
2373 wrap_c, 8) < 20 * s->qscale)
2375 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2376 dest_cr + uv_dct_offset,
2377 wrap_c, 8) < 20 * s->qscale)
2383 if (s->quantizer_noise_shaping) {
2385 get_visual_weight(weight[0], ptr_y , wrap_y);
2387 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2389 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2391 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2393 get_visual_weight(weight[4], ptr_cb , wrap_c);
2395 get_visual_weight(weight[5], ptr_cr , wrap_c);
2396 if (!s->chroma_y_shift) { /* 422 */
2398 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2401 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2404 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2407 /* DCT & quantize */
2408 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2410 for (i = 0; i < mb_block_count; i++) {
2413 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2414 // FIXME we could decide to change to quantizer instead of
2416 // JS: I don't think that would be a good idea it could lower
2417 // quality instead of improve it. Just INTRADC clipping
2418 // deserves changes in quantizer
2420 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2422 s->block_last_index[i] = -1;
2424 if (s->quantizer_noise_shaping) {
2425 for (i = 0; i < mb_block_count; i++) {
2427 s->block_last_index[i] =
2428 dct_quantize_refine(s, s->block[i], weight[i],
2429 orig[i], i, s->qscale);
2434 if (s->luma_elim_threshold && !s->mb_intra)
2435 for (i = 0; i < 4; i++)
2436 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2437 if (s->chroma_elim_threshold && !s->mb_intra)
2438 for (i = 4; i < mb_block_count; i++)
2439 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2441 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2442 for (i = 0; i < mb_block_count; i++) {
2443 if (s->block_last_index[i] == -1)
2444 s->coded_score[i] = INT_MAX / 256;
2449 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2450 s->block_last_index[4] =
2451 s->block_last_index[5] = 0;
2453 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2454 if (!s->chroma_y_shift) { /* 422 / 444 */
2455 for (i=6; i<12; i++) {
2456 s->block_last_index[i] = 0;
2457 s->block[i][0] = s->block[4][0];
2462 // non c quantize code returns incorrect block_last_index FIXME
2463 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2464 for (i = 0; i < mb_block_count; i++) {
2466 if (s->block_last_index[i] > 0) {
2467 for (j = 63; j > 0; j--) {
2468 if (s->block[i][s->intra_scantable.permutated[j]])
2471 s->block_last_index[i] = j;
2476 /* huffman encode */
2477 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2478 case AV_CODEC_ID_MPEG1VIDEO:
2479 case AV_CODEC_ID_MPEG2VIDEO:
2480 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2481 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2483 case AV_CODEC_ID_MPEG4:
2484 if (CONFIG_MPEG4_ENCODER)
2485 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2487 case AV_CODEC_ID_MSMPEG4V2:
2488 case AV_CODEC_ID_MSMPEG4V3:
2489 case AV_CODEC_ID_WMV1:
2490 if (CONFIG_MSMPEG4_ENCODER)
2491 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2493 case AV_CODEC_ID_WMV2:
2494 if (CONFIG_WMV2_ENCODER)
2495 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2497 case AV_CODEC_ID_H261:
2498 if (CONFIG_H261_ENCODER)
2499 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2501 case AV_CODEC_ID_H263:
2502 case AV_CODEC_ID_H263P:
2503 case AV_CODEC_ID_FLV1:
2504 case AV_CODEC_ID_RV10:
2505 case AV_CODEC_ID_RV20:
2506 if (CONFIG_H263_ENCODER)
2507 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2509 case AV_CODEC_ID_MJPEG:
2510 case AV_CODEC_ID_AMV:
2511 if (CONFIG_MJPEG_ENCODER)
2512 ff_mjpeg_encode_mb(s, s->block);
2519 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2521 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2522 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2523 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2526 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2529 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2532 d->mb_skip_run= s->mb_skip_run;
2534 d->last_dc[i] = s->last_dc[i];
2537 d->mv_bits= s->mv_bits;
2538 d->i_tex_bits= s->i_tex_bits;
2539 d->p_tex_bits= s->p_tex_bits;
2540 d->i_count= s->i_count;
2541 d->f_count= s->f_count;
2542 d->b_count= s->b_count;
2543 d->skip_count= s->skip_count;
2544 d->misc_bits= s->misc_bits;
2548 d->qscale= s->qscale;
2549 d->dquant= s->dquant;
2551 d->esc3_level_length= s->esc3_level_length;
2554 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2557 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2558 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2561 d->mb_skip_run= s->mb_skip_run;
2563 d->last_dc[i] = s->last_dc[i];
2566 d->mv_bits= s->mv_bits;
2567 d->i_tex_bits= s->i_tex_bits;
2568 d->p_tex_bits= s->p_tex_bits;
2569 d->i_count= s->i_count;
2570 d->f_count= s->f_count;
2571 d->b_count= s->b_count;
2572 d->skip_count= s->skip_count;
2573 d->misc_bits= s->misc_bits;
2575 d->mb_intra= s->mb_intra;
2576 d->mb_skipped= s->mb_skipped;
2577 d->mv_type= s->mv_type;
2578 d->mv_dir= s->mv_dir;
2580 if(s->data_partitioning){
2582 d->tex_pb= s->tex_pb;
2586 d->block_last_index[i]= s->block_last_index[i];
2587 d->interlaced_dct= s->interlaced_dct;
2588 d->qscale= s->qscale;
2590 d->esc3_level_length= s->esc3_level_length;
2593 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2594 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2595 int *dmin, int *next_block, int motion_x, int motion_y)
2598 uint8_t *dest_backup[3];
2600 copy_context_before_encode(s, backup, type);
2602 s->block= s->blocks[*next_block];
2603 s->pb= pb[*next_block];
2604 if(s->data_partitioning){
2605 s->pb2 = pb2 [*next_block];
2606 s->tex_pb= tex_pb[*next_block];
2610 memcpy(dest_backup, s->dest, sizeof(s->dest));
2611 s->dest[0] = s->sc.rd_scratchpad;
2612 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2613 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2614 av_assert0(s->linesize >= 32); //FIXME
2617 encode_mb(s, motion_x, motion_y);
2619 score= put_bits_count(&s->pb);
2620 if(s->data_partitioning){
2621 score+= put_bits_count(&s->pb2);
2622 score+= put_bits_count(&s->tex_pb);
2625 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2626 ff_mpv_decode_mb(s, s->block);
2628 score *= s->lambda2;
2629 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2633 memcpy(s->dest, dest_backup, sizeof(s->dest));
2640 copy_context_after_encode(best, s, type);
2644 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2645 uint32_t *sq = ff_square_tab + 256;
2650 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2651 else if(w==8 && h==8)
2652 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2656 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2665 static int sse_mb(MpegEncContext *s){
2669 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2670 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2673 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2674 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2675 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2676 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2678 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2679 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2680 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2683 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2684 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2685 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2688 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2689 MpegEncContext *s= *(void**)arg;
2693 s->me.dia_size= s->avctx->pre_dia_size;
2694 s->first_slice_line=1;
2695 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2696 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2697 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2699 s->first_slice_line=0;
2707 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2708 MpegEncContext *s= *(void**)arg;
2710 ff_check_alignment();
2712 s->me.dia_size= s->avctx->dia_size;
2713 s->first_slice_line=1;
2714 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2715 s->mb_x=0; //for block init below
2716 ff_init_block_index(s);
2717 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2718 s->block_index[0]+=2;
2719 s->block_index[1]+=2;
2720 s->block_index[2]+=2;
2721 s->block_index[3]+=2;
2723 /* compute motion vector & mb_type and store in context */
2724 if(s->pict_type==AV_PICTURE_TYPE_B)
2725 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2727 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2729 s->first_slice_line=0;
2734 static int mb_var_thread(AVCodecContext *c, void *arg){
2735 MpegEncContext *s= *(void**)arg;
2738 ff_check_alignment();
2740 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2741 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2744 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2746 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2748 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2749 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2751 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2752 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2753 s->me.mb_var_sum_temp += varc;
2759 static void write_slice_end(MpegEncContext *s){
2760 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2761 if(s->partitioned_frame){
2762 ff_mpeg4_merge_partitions(s);
2765 ff_mpeg4_stuffing(&s->pb);
2766 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2767 ff_mjpeg_encode_stuffing(s);
2770 avpriv_align_put_bits(&s->pb);
2771 flush_put_bits(&s->pb);
2773 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2774 s->misc_bits+= get_bits_diff(s);
2777 static void write_mb_info(MpegEncContext *s)
2779 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2780 int offset = put_bits_count(&s->pb);
2781 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2782 int gobn = s->mb_y / s->gob_index;
2784 if (CONFIG_H263_ENCODER)
2785 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2786 bytestream_put_le32(&ptr, offset);
2787 bytestream_put_byte(&ptr, s->qscale);
2788 bytestream_put_byte(&ptr, gobn);
2789 bytestream_put_le16(&ptr, mba);
2790 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2791 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2792 /* 4MV not implemented */
2793 bytestream_put_byte(&ptr, 0); /* hmv2 */
2794 bytestream_put_byte(&ptr, 0); /* vmv2 */
2797 static void update_mb_info(MpegEncContext *s, int startcode)
2801 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2802 s->mb_info_size += 12;
2803 s->prev_mb_info = s->last_mb_info;
2806 s->prev_mb_info = put_bits_count(&s->pb)/8;
2807 /* This might have incremented mb_info_size above, and we return without
2808 * actually writing any info into that slot yet. But in that case,
2809 * this will be called again at the start of the after writing the
2810 * start code, actually writing the mb info. */
2814 s->last_mb_info = put_bits_count(&s->pb)/8;
2815 if (!s->mb_info_size)
2816 s->mb_info_size += 12;
2820 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2822 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2823 && s->slice_context_count == 1
2824 && s->pb.buf == s->avctx->internal->byte_buffer) {
2825 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2826 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2828 uint8_t *new_buffer = NULL;
2829 int new_buffer_size = 0;
2831 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2832 s->avctx->internal->byte_buffer_size + size_increase);
2834 return AVERROR(ENOMEM);
2836 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2837 av_free(s->avctx->internal->byte_buffer);
2838 s->avctx->internal->byte_buffer = new_buffer;
2839 s->avctx->internal->byte_buffer_size = new_buffer_size;
2840 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2841 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2842 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2844 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2845 return AVERROR(EINVAL);
2849 static int encode_thread(AVCodecContext *c, void *arg){
2850 MpegEncContext *s= *(void**)arg;
2851 int mb_x, mb_y, pdif = 0;
2852 int chr_h= 16>>s->chroma_y_shift;
2854 MpegEncContext best_s = { 0 }, backup_s;
2855 uint8_t bit_buf[2][MAX_MB_BYTES];
2856 uint8_t bit_buf2[2][MAX_MB_BYTES];
2857 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2858 PutBitContext pb[2], pb2[2], tex_pb[2];
2860 ff_check_alignment();
2863 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2864 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2865 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2868 s->last_bits= put_bits_count(&s->pb);
2879 /* init last dc values */
2880 /* note: quant matrix value (8) is implied here */
2881 s->last_dc[i] = 128 << s->intra_dc_precision;
2883 s->current_picture.encoding_error[i] = 0;
2885 if(s->codec_id==AV_CODEC_ID_AMV){
2886 s->last_dc[0] = 128*8/13;
2887 s->last_dc[1] = 128*8/14;
2888 s->last_dc[2] = 128*8/14;
2891 memset(s->last_mv, 0, sizeof(s->last_mv));
2895 switch(s->codec_id){
2896 case AV_CODEC_ID_H263:
2897 case AV_CODEC_ID_H263P:
2898 case AV_CODEC_ID_FLV1:
2899 if (CONFIG_H263_ENCODER)
2900 s->gob_index = H263_GOB_HEIGHT(s->height);
2902 case AV_CODEC_ID_MPEG4:
2903 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2904 ff_mpeg4_init_partitions(s);
2910 s->first_slice_line = 1;
2911 s->ptr_lastgob = s->pb.buf;
2912 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2916 ff_set_qscale(s, s->qscale);
2917 ff_init_block_index(s);
2919 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2920 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2921 int mb_type= s->mb_type[xy];
2925 int size_increase = s->avctx->internal->byte_buffer_size/4
2926 + s->mb_width*MAX_MB_BYTES;
2928 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2929 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2930 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2933 if(s->data_partitioning){
2934 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2935 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2936 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2942 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2943 ff_update_block_index(s);
2945 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2946 ff_h261_reorder_mb_index(s);
2947 xy= s->mb_y*s->mb_stride + s->mb_x;
2948 mb_type= s->mb_type[xy];
2951 /* write gob / video packet header */
2953 int current_packet_size, is_gob_start;
2955 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2957 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2959 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2961 switch(s->codec_id){
2962 case AV_CODEC_ID_H263:
2963 case AV_CODEC_ID_H263P:
2964 if(!s->h263_slice_structured)
2965 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2967 case AV_CODEC_ID_MPEG2VIDEO:
2968 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2969 case AV_CODEC_ID_MPEG1VIDEO:
2970 if(s->mb_skip_run) is_gob_start=0;
2972 case AV_CODEC_ID_MJPEG:
2973 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2978 if(s->start_mb_y != mb_y || mb_x!=0){
2981 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2982 ff_mpeg4_init_partitions(s);
2986 av_assert2((put_bits_count(&s->pb)&7) == 0);
2987 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2989 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2990 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2991 int d = 100 / s->error_rate;
2993 current_packet_size=0;
2994 s->pb.buf_ptr= s->ptr_lastgob;
2995 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2999 #if FF_API_RTP_CALLBACK
3000 FF_DISABLE_DEPRECATION_WARNINGS
3001 if (s->avctx->rtp_callback){
3002 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3003 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3005 FF_ENABLE_DEPRECATION_WARNINGS
3007 update_mb_info(s, 1);
3009 switch(s->codec_id){
3010 case AV_CODEC_ID_MPEG4:
3011 if (CONFIG_MPEG4_ENCODER) {
3012 ff_mpeg4_encode_video_packet_header(s);
3013 ff_mpeg4_clean_buffers(s);
3016 case AV_CODEC_ID_MPEG1VIDEO:
3017 case AV_CODEC_ID_MPEG2VIDEO:
3018 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3019 ff_mpeg1_encode_slice_header(s);
3020 ff_mpeg1_clean_buffers(s);
3023 case AV_CODEC_ID_H263:
3024 case AV_CODEC_ID_H263P:
3025 if (CONFIG_H263_ENCODER)
3026 ff_h263_encode_gob_header(s, mb_y);
3030 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3031 int bits= put_bits_count(&s->pb);
3032 s->misc_bits+= bits - s->last_bits;
3036 s->ptr_lastgob += current_packet_size;
3037 s->first_slice_line=1;
3038 s->resync_mb_x=mb_x;
3039 s->resync_mb_y=mb_y;
3043 if( (s->resync_mb_x == s->mb_x)
3044 && s->resync_mb_y+1 == s->mb_y){
3045 s->first_slice_line=0;
3049 s->dquant=0; //only for QP_RD
3051 update_mb_info(s, 0);
3053 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3055 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3057 copy_context_before_encode(&backup_s, s, -1);
3059 best_s.data_partitioning= s->data_partitioning;
3060 best_s.partitioned_frame= s->partitioned_frame;
3061 if(s->data_partitioning){
3062 backup_s.pb2= s->pb2;
3063 backup_s.tex_pb= s->tex_pb;
3066 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3067 s->mv_dir = MV_DIR_FORWARD;
3068 s->mv_type = MV_TYPE_16X16;
3070 s->mv[0][0][0] = s->p_mv_table[xy][0];
3071 s->mv[0][0][1] = s->p_mv_table[xy][1];
3072 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3073 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3075 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3076 s->mv_dir = MV_DIR_FORWARD;
3077 s->mv_type = MV_TYPE_FIELD;
3080 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3081 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3082 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3084 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3085 &dmin, &next_block, 0, 0);
3087 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3088 s->mv_dir = MV_DIR_FORWARD;
3089 s->mv_type = MV_TYPE_16X16;
3093 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3094 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3096 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3097 s->mv_dir = MV_DIR_FORWARD;
3098 s->mv_type = MV_TYPE_8X8;
3101 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3102 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3104 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3105 &dmin, &next_block, 0, 0);
3107 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3108 s->mv_dir = MV_DIR_FORWARD;
3109 s->mv_type = MV_TYPE_16X16;
3111 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3112 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3113 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3114 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3116 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3117 s->mv_dir = MV_DIR_BACKWARD;
3118 s->mv_type = MV_TYPE_16X16;
3120 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3121 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3122 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3123 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3125 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3126 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3127 s->mv_type = MV_TYPE_16X16;
3129 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3130 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3131 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3132 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3133 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3134 &dmin, &next_block, 0, 0);
3136 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3137 s->mv_dir = MV_DIR_FORWARD;
3138 s->mv_type = MV_TYPE_FIELD;
3141 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3142 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3143 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3145 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3146 &dmin, &next_block, 0, 0);
3148 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3149 s->mv_dir = MV_DIR_BACKWARD;
3150 s->mv_type = MV_TYPE_FIELD;
3153 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3154 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3155 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3157 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3158 &dmin, &next_block, 0, 0);
3160 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3161 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3162 s->mv_type = MV_TYPE_FIELD;
3164 for(dir=0; dir<2; dir++){
3166 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3167 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3168 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3171 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3172 &dmin, &next_block, 0, 0);
3174 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3176 s->mv_type = MV_TYPE_16X16;
3180 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3181 &dmin, &next_block, 0, 0);
3182 if(s->h263_pred || s->h263_aic){
3184 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3186 ff_clean_intra_table_entries(s); //old mode?
3190 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3191 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3192 const int last_qp= backup_s.qscale;
3195 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3196 static const int dquant_tab[4]={-1,1,-2,2};
3197 int storecoefs = s->mb_intra && s->dc_val[0];
3199 av_assert2(backup_s.dquant == 0);
3202 s->mv_dir= best_s.mv_dir;
3203 s->mv_type = MV_TYPE_16X16;
3204 s->mb_intra= best_s.mb_intra;
3205 s->mv[0][0][0] = best_s.mv[0][0][0];
3206 s->mv[0][0][1] = best_s.mv[0][0][1];
3207 s->mv[1][0][0] = best_s.mv[1][0][0];
3208 s->mv[1][0][1] = best_s.mv[1][0][1];
3210 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3211 for(; qpi<4; qpi++){
3212 int dquant= dquant_tab[qpi];
3213 qp= last_qp + dquant;
3214 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3216 backup_s.dquant= dquant;
3219 dc[i]= s->dc_val[0][ s->block_index[i] ];
3220 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3224 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3225 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3226 if(best_s.qscale != qp){
3229 s->dc_val[0][ s->block_index[i] ]= dc[i];
3230 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3237 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3238 int mx= s->b_direct_mv_table[xy][0];
3239 int my= s->b_direct_mv_table[xy][1];
3241 backup_s.dquant = 0;
3242 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3244 ff_mpeg4_set_direct_mv(s, mx, my);
3245 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3246 &dmin, &next_block, mx, my);
3248 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3249 backup_s.dquant = 0;
3250 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3252 ff_mpeg4_set_direct_mv(s, 0, 0);
3253 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3254 &dmin, &next_block, 0, 0);
3256 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3259 coded |= s->block_last_index[i];
3262 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3263 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3264 mx=my=0; //FIXME find the one we actually used
3265 ff_mpeg4_set_direct_mv(s, mx, my);
3266 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3274 s->mv_dir= best_s.mv_dir;
3275 s->mv_type = best_s.mv_type;
3277 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3278 s->mv[0][0][1] = best_s.mv[0][0][1];
3279 s->mv[1][0][0] = best_s.mv[1][0][0];
3280 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3283 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3284 &dmin, &next_block, mx, my);
3289 s->current_picture.qscale_table[xy] = best_s.qscale;
3291 copy_context_after_encode(s, &best_s, -1);
3293 pb_bits_count= put_bits_count(&s->pb);
3294 flush_put_bits(&s->pb);
3295 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3298 if(s->data_partitioning){
3299 pb2_bits_count= put_bits_count(&s->pb2);
3300 flush_put_bits(&s->pb2);
3301 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3302 s->pb2= backup_s.pb2;
3304 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3305 flush_put_bits(&s->tex_pb);
3306 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3307 s->tex_pb= backup_s.tex_pb;
3309 s->last_bits= put_bits_count(&s->pb);
3311 if (CONFIG_H263_ENCODER &&
3312 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3313 ff_h263_update_motion_val(s);
3315 if(next_block==0){ //FIXME 16 vs linesize16
3316 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3317 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3318 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3321 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3322 ff_mpv_decode_mb(s, s->block);
3324 int motion_x = 0, motion_y = 0;
3325 s->mv_type=MV_TYPE_16X16;
3326 // only one MB-Type possible
3329 case CANDIDATE_MB_TYPE_INTRA:
3332 motion_x= s->mv[0][0][0] = 0;
3333 motion_y= s->mv[0][0][1] = 0;
3335 case CANDIDATE_MB_TYPE_INTER:
3336 s->mv_dir = MV_DIR_FORWARD;
3338 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3339 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3341 case CANDIDATE_MB_TYPE_INTER_I:
3342 s->mv_dir = MV_DIR_FORWARD;
3343 s->mv_type = MV_TYPE_FIELD;
3346 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3347 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3348 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3351 case CANDIDATE_MB_TYPE_INTER4V:
3352 s->mv_dir = MV_DIR_FORWARD;
3353 s->mv_type = MV_TYPE_8X8;
3356 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3357 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3360 case CANDIDATE_MB_TYPE_DIRECT:
3361 if (CONFIG_MPEG4_ENCODER) {
3362 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3364 motion_x=s->b_direct_mv_table[xy][0];
3365 motion_y=s->b_direct_mv_table[xy][1];
3366 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3369 case CANDIDATE_MB_TYPE_DIRECT0:
3370 if (CONFIG_MPEG4_ENCODER) {
3371 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3373 ff_mpeg4_set_direct_mv(s, 0, 0);
3376 case CANDIDATE_MB_TYPE_BIDIR:
3377 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3379 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3380 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3381 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3382 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3384 case CANDIDATE_MB_TYPE_BACKWARD:
3385 s->mv_dir = MV_DIR_BACKWARD;
3387 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3388 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3390 case CANDIDATE_MB_TYPE_FORWARD:
3391 s->mv_dir = MV_DIR_FORWARD;
3393 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3394 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3396 case CANDIDATE_MB_TYPE_FORWARD_I:
3397 s->mv_dir = MV_DIR_FORWARD;
3398 s->mv_type = MV_TYPE_FIELD;
3401 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3402 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3403 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3406 case CANDIDATE_MB_TYPE_BACKWARD_I:
3407 s->mv_dir = MV_DIR_BACKWARD;
3408 s->mv_type = MV_TYPE_FIELD;
3411 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3412 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3413 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3416 case CANDIDATE_MB_TYPE_BIDIR_I:
3417 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3418 s->mv_type = MV_TYPE_FIELD;
3420 for(dir=0; dir<2; dir++){
3422 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3423 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3424 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3429 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3432 encode_mb(s, motion_x, motion_y);
3434 // RAL: Update last macroblock type
3435 s->last_mv_dir = s->mv_dir;
3437 if (CONFIG_H263_ENCODER &&
3438 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3439 ff_h263_update_motion_val(s);
3441 ff_mpv_decode_mb(s, s->block);
3444 /* clean the MV table in IPS frames for direct mode in B frames */
3445 if(s->mb_intra /* && I,P,S_TYPE */){
3446 s->p_mv_table[xy][0]=0;
3447 s->p_mv_table[xy][1]=0;
3450 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3454 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3455 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3457 s->current_picture.encoding_error[0] += sse(
3458 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3459 s->dest[0], w, h, s->linesize);
3460 s->current_picture.encoding_error[1] += sse(
3461 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3462 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3463 s->current_picture.encoding_error[2] += sse(
3464 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3465 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3468 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3469 ff_h263_loop_filter(s);
3471 ff_dlog(s->avctx, "MB %d %d bits\n",
3472 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3476 //not beautiful here but we must write it before flushing so it has to be here
3477 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3478 ff_msmpeg4_encode_ext_header(s);
3482 #if FF_API_RTP_CALLBACK
3483 FF_DISABLE_DEPRECATION_WARNINGS
3484 /* Send the last GOB if RTP */
3485 if (s->avctx->rtp_callback) {
3486 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3487 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3488 /* Call the RTP callback to send the last GOB */
3490 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3492 FF_ENABLE_DEPRECATION_WARNINGS
3498 #define MERGE(field) dst->field += src->field; src->field=0
3499 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3500 MERGE(me.scene_change_score);
3501 MERGE(me.mc_mb_var_sum_temp);
3502 MERGE(me.mb_var_sum_temp);
3505 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3508 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3509 MERGE(dct_count[1]);
3518 MERGE(er.error_count);
3519 MERGE(padding_bug_score);
3520 MERGE(current_picture.encoding_error[0]);
3521 MERGE(current_picture.encoding_error[1]);
3522 MERGE(current_picture.encoding_error[2]);
3524 if(dst->avctx->noise_reduction){
3525 for(i=0; i<64; i++){
3526 MERGE(dct_error_sum[0][i]);
3527 MERGE(dct_error_sum[1][i]);
3531 assert(put_bits_count(&src->pb) % 8 ==0);
3532 assert(put_bits_count(&dst->pb) % 8 ==0);
3533 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3534 flush_put_bits(&dst->pb);
3537 static int estimate_qp(MpegEncContext *s, int dry_run){
3538 if (s->next_lambda){
3539 s->current_picture_ptr->f->quality =
3540 s->current_picture.f->quality = s->next_lambda;
3541 if(!dry_run) s->next_lambda= 0;
3542 } else if (!s->fixed_qscale) {
3543 s->current_picture_ptr->f->quality =
3544 s->current_picture.f->quality = ff_rate_estimate_qscale(s, dry_run);
3545 if (s->current_picture.f->quality < 0)
3549 if(s->adaptive_quant){
3550 switch(s->codec_id){
3551 case AV_CODEC_ID_MPEG4:
3552 if (CONFIG_MPEG4_ENCODER)
3553 ff_clean_mpeg4_qscales(s);
3555 case AV_CODEC_ID_H263:
3556 case AV_CODEC_ID_H263P:
3557 case AV_CODEC_ID_FLV1:
3558 if (CONFIG_H263_ENCODER)
3559 ff_clean_h263_qscales(s);
3562 ff_init_qscale_tab(s);
3565 s->lambda= s->lambda_table[0];
3568 s->lambda = s->current_picture.f->quality;
3573 /* must be called before writing the header */
3574 static void set_frame_distances(MpegEncContext * s){
3575 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3576 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3578 if(s->pict_type==AV_PICTURE_TYPE_B){
3579 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3580 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3582 s->pp_time= s->time - s->last_non_b_time;
3583 s->last_non_b_time= s->time;
3584 assert(s->picture_number==0 || s->pp_time > 0);
3588 static int encode_picture(MpegEncContext *s, int picture_number)
3592 int context_count = s->slice_context_count;
3594 s->picture_number = picture_number;
3596 /* Reset the average MB variance */
3597 s->me.mb_var_sum_temp =
3598 s->me.mc_mb_var_sum_temp = 0;
3600 /* we need to initialize some time vars before we can encode b-frames */
3601 // RAL: Condition added for MPEG1VIDEO
3602 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3603 set_frame_distances(s);
3604 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3605 ff_set_mpeg4_time(s);
3607 s->me.scene_change_score=0;
3609 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3611 if(s->pict_type==AV_PICTURE_TYPE_I){
3612 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3613 else s->no_rounding=0;
3614 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3615 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3616 s->no_rounding ^= 1;
3619 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3620 if (estimate_qp(s,1) < 0)
3622 ff_get_2pass_fcode(s);
3623 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3624 if(s->pict_type==AV_PICTURE_TYPE_B)
3625 s->lambda= s->last_lambda_for[s->pict_type];
3627 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3631 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3632 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3633 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3634 s->q_chroma_intra_matrix = s->q_intra_matrix;
3635 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3638 s->mb_intra=0; //for the rate distortion & bit compare functions
3639 for(i=1; i<context_count; i++){
3640 ret = ff_update_duplicate_context(s->thread_context[i], s);
3648 /* Estimate motion for every MB */
3649 if(s->pict_type != AV_PICTURE_TYPE_I){
3650 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3651 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3652 if (s->pict_type != AV_PICTURE_TYPE_B) {
3653 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3654 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3658 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3659 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3661 for(i=0; i<s->mb_stride*s->mb_height; i++)
3662 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3664 if(!s->fixed_qscale){
3665 /* finding spatial complexity for I-frame rate control */
3666 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3669 for(i=1; i<context_count; i++){
3670 merge_context_after_me(s, s->thread_context[i]);
3672 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3673 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3676 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3677 s->pict_type= AV_PICTURE_TYPE_I;
3678 for(i=0; i<s->mb_stride*s->mb_height; i++)
3679 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3680 if(s->msmpeg4_version >= 3)
3682 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3683 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3687 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3688 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3690 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3692 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3693 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3694 s->f_code= FFMAX3(s->f_code, a, b);
3697 ff_fix_long_p_mvs(s);
3698 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3699 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3703 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3704 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3709 if(s->pict_type==AV_PICTURE_TYPE_B){
3712 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3713 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3714 s->f_code = FFMAX(a, b);
3716 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3717 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3718 s->b_code = FFMAX(a, b);
3720 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3721 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3722 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3723 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3724 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3726 for(dir=0; dir<2; dir++){
3729 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3730 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3731 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3732 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3740 if (estimate_qp(s, 0) < 0)
3743 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3744 s->pict_type == AV_PICTURE_TYPE_I &&
3745 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3746 s->qscale= 3; //reduce clipping problems
3748 if (s->out_format == FMT_MJPEG) {
3749 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3750 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3752 if (s->avctx->intra_matrix) {
3754 luma_matrix = s->avctx->intra_matrix;
3756 if (s->avctx->chroma_intra_matrix)
3757 chroma_matrix = s->avctx->chroma_intra_matrix;
3759 /* for mjpeg, we do include qscale in the matrix */
3761 int j = s->idsp.idct_permutation[i];
3763 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3764 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3766 s->y_dc_scale_table=
3767 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3768 s->chroma_intra_matrix[0] =
3769 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3770 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3771 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3772 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3773 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3776 if(s->codec_id == AV_CODEC_ID_AMV){
3777 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3778 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3780 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3782 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3783 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3785 s->y_dc_scale_table= y;
3786 s->c_dc_scale_table= c;
3787 s->intra_matrix[0] = 13;
3788 s->chroma_intra_matrix[0] = 14;
3789 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3790 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3791 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3792 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3796 //FIXME var duplication
3797 s->current_picture_ptr->f->key_frame =
3798 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3799 s->current_picture_ptr->f->pict_type =
3800 s->current_picture.f->pict_type = s->pict_type;
3802 if (s->current_picture.f->key_frame)
3803 s->picture_in_gop_number=0;
3805 s->mb_x = s->mb_y = 0;
3806 s->last_bits= put_bits_count(&s->pb);
3807 switch(s->out_format) {
3809 if (CONFIG_MJPEG_ENCODER)
3810 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3811 s->intra_matrix, s->chroma_intra_matrix);
3814 if (CONFIG_H261_ENCODER)
3815 ff_h261_encode_picture_header(s, picture_number);
3818 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3819 ff_wmv2_encode_picture_header(s, picture_number);
3820 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3821 ff_msmpeg4_encode_picture_header(s, picture_number);
3822 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3823 ff_mpeg4_encode_picture_header(s, picture_number);
3824 else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3825 ret = ff_rv10_encode_picture_header(s, picture_number);
3829 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3830 ff_rv20_encode_picture_header(s, picture_number);
3831 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3832 ff_flv_encode_picture_header(s, picture_number);
3833 else if (CONFIG_H263_ENCODER)
3834 ff_h263_encode_picture_header(s, picture_number);
3837 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3838 ff_mpeg1_encode_picture_header(s, picture_number);
3843 bits= put_bits_count(&s->pb);
3844 s->header_bits= bits - s->last_bits;
3846 for(i=1; i<context_count; i++){
3847 update_duplicate_context_after_me(s->thread_context[i], s);
3849 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3850 for(i=1; i<context_count; i++){
3851 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3852 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3853 merge_context_after_encode(s, s->thread_context[i]);
3859 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3860 const int intra= s->mb_intra;
3863 s->dct_count[intra]++;
3865 for(i=0; i<64; i++){
3866 int level= block[i];
3870 s->dct_error_sum[intra][i] += level;
3871 level -= s->dct_offset[intra][i];
3872 if(level<0) level=0;
3874 s->dct_error_sum[intra][i] -= level;
3875 level += s->dct_offset[intra][i];
3876 if(level>0) level=0;
3883 static int dct_quantize_trellis_c(MpegEncContext *s,
3884 int16_t *block, int n,
3885 int qscale, int *overflow){
3887 const uint16_t *matrix;
3888 const uint8_t *scantable= s->intra_scantable.scantable;
3889 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3891 unsigned int threshold1, threshold2;
3903 int coeff_count[64];
3904 int qmul, qadd, start_i, last_non_zero, i, dc;
3905 const int esc_length= s->ac_esc_length;
3907 uint8_t * last_length;
3908 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3911 s->fdsp.fdct(block);
3913 if(s->dct_error_sum)
3914 s->denoise_dct(s, block);
3916 qadd= ((qscale-1)|1)*8;
3918 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3919 else mpeg2_qscale = qscale << 1;
3930 /* For AIC we skip quant/dequant of INTRADC */
3935 /* note: block[0] is assumed to be positive */
3936 block[0] = (block[0] + (q >> 1)) / q;
3939 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3940 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3941 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3942 bias= 1<<(QMAT_SHIFT-1);
3944 if (n > 3 && s->intra_chroma_ac_vlc_length) {
3945 length = s->intra_chroma_ac_vlc_length;
3946 last_length= s->intra_chroma_ac_vlc_last_length;
3948 length = s->intra_ac_vlc_length;
3949 last_length= s->intra_ac_vlc_last_length;
3954 qmat = s->q_inter_matrix[qscale];
3955 matrix = s->inter_matrix;
3956 length = s->inter_ac_vlc_length;
3957 last_length= s->inter_ac_vlc_last_length;
3961 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3962 threshold2= (threshold1<<1);
3964 for(i=63; i>=start_i; i--) {
3965 const int j = scantable[i];
3966 int level = block[j] * qmat[j];
3968 if(((unsigned)(level+threshold1))>threshold2){
3974 for(i=start_i; i<=last_non_zero; i++) {
3975 const int j = scantable[i];
3976 int level = block[j] * qmat[j];
3978 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3979 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3980 if(((unsigned)(level+threshold1))>threshold2){
3982 level= (bias + level)>>QMAT_SHIFT;
3984 coeff[1][i]= level-1;
3985 // coeff[2][k]= level-2;
3987 level= (bias - level)>>QMAT_SHIFT;
3988 coeff[0][i]= -level;
3989 coeff[1][i]= -level+1;
3990 // coeff[2][k]= -level+2;
3992 coeff_count[i]= FFMIN(level, 2);
3993 av_assert2(coeff_count[i]);
3996 coeff[0][i]= (level>>31)|1;
4001 *overflow= s->max_qcoeff < max; //overflow might have happened
4003 if(last_non_zero < start_i){
4004 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4005 return last_non_zero;
4008 score_tab[start_i]= 0;
4009 survivor[0]= start_i;
4012 for(i=start_i; i<=last_non_zero; i++){
4013 int level_index, j, zero_distortion;
4014 int dct_coeff= FFABS(block[ scantable[i] ]);
4015 int best_score=256*256*256*120;
4017 if (s->fdsp.fdct == ff_fdct_ifast)
4018 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4019 zero_distortion= dct_coeff*dct_coeff;
4021 for(level_index=0; level_index < coeff_count[i]; level_index++){
4023 int level= coeff[level_index][i];
4024 const int alevel= FFABS(level);
4029 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4030 unquant_coeff= alevel*qmul + qadd;
4031 } else if(s->out_format == FMT_MJPEG) {
4032 j = s->idsp.idct_permutation[scantable[i]];
4033 unquant_coeff = alevel * matrix[j] * 8;
4035 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4037 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4038 unquant_coeff = (unquant_coeff - 1) | 1;
4040 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4041 unquant_coeff = (unquant_coeff - 1) | 1;
4046 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4048 if((level&(~127)) == 0){
4049 for(j=survivor_count-1; j>=0; j--){
4050 int run= i - survivor[j];
4051 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4052 score += score_tab[i-run];
4054 if(score < best_score){
4057 level_tab[i+1]= level-64;
4061 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4062 for(j=survivor_count-1; j>=0; j--){
4063 int run= i - survivor[j];
4064 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4065 score += score_tab[i-run];
4066 if(score < last_score){
4069 last_level= level-64;
4075 distortion += esc_length*lambda;
4076 for(j=survivor_count-1; j>=0; j--){
4077 int run= i - survivor[j];
4078 int score= distortion + score_tab[i-run];
4080 if(score < best_score){
4083 level_tab[i+1]= level-64;
4087 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4088 for(j=survivor_count-1; j>=0; j--){
4089 int run= i - survivor[j];
4090 int score= distortion + score_tab[i-run];
4091 if(score < last_score){
4094 last_level= level-64;
4102 score_tab[i+1]= best_score;
4104 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
4105 if(last_non_zero <= 27){
4106 for(; survivor_count; survivor_count--){
4107 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4111 for(; survivor_count; survivor_count--){
4112 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4117 survivor[ survivor_count++ ]= i+1;
4120 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4121 last_score= 256*256*256*120;
4122 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4123 int score= score_tab[i];
4124 if(i) score += lambda*2; //FIXME exacter?
4126 if(score < last_score){
4129 last_level= level_tab[i];
4130 last_run= run_tab[i];
4135 s->coded_score[n] = last_score;
4137 dc= FFABS(block[0]);
4138 last_non_zero= last_i - 1;
4139 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4141 if(last_non_zero < start_i)
4142 return last_non_zero;
4144 if(last_non_zero == 0 && start_i == 0){
4146 int best_score= dc * dc;
4148 for(i=0; i<coeff_count[0]; i++){
4149 int level= coeff[i][0];
4150 int alevel= FFABS(level);
4151 int unquant_coeff, score, distortion;
4153 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4154 unquant_coeff= (alevel*qmul + qadd)>>3;
4156 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4157 unquant_coeff = (unquant_coeff - 1) | 1;
4159 unquant_coeff = (unquant_coeff + 4) >> 3;
4160 unquant_coeff<<= 3 + 3;
4162 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4164 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4165 else score= distortion + esc_length*lambda;
4167 if(score < best_score){
4169 best_level= level - 64;
4172 block[0]= best_level;
4173 s->coded_score[n] = best_score - dc*dc;
4174 if(best_level == 0) return -1;
4175 else return last_non_zero;
4179 av_assert2(last_level);
4181 block[ perm_scantable[last_non_zero] ]= last_level;
4184 for(; i>start_i; i -= run_tab[i] + 1){
4185 block[ perm_scantable[i-1] ]= level_tab[i];
4188 return last_non_zero;
4191 //#define REFINE_STATS 1
4192 static int16_t basis[64][64];
4194 static void build_basis(uint8_t *perm){
4201 double s= 0.25*(1<<BASIS_SHIFT);
4203 int perm_index= perm[index];
4204 if(i==0) s*= sqrt(0.5);
4205 if(j==0) s*= sqrt(0.5);
4206 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4213 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4214 int16_t *block, int16_t *weight, int16_t *orig,
4217 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4218 const uint8_t *scantable= s->intra_scantable.scantable;
4219 const uint8_t *perm_scantable= s->intra_scantable.permutated;
4220 // unsigned int threshold1, threshold2;
4225 int qmul, qadd, start_i, last_non_zero, i, dc;
4227 uint8_t * last_length;
4229 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4232 static int after_last=0;
4233 static int to_zero=0;
4234 static int from_zero=0;
4237 static int messed_sign=0;
4240 if(basis[0][0] == 0)
4241 build_basis(s->idsp.idct_permutation);
4252 /* For AIC we skip quant/dequant of INTRADC */
4256 q <<= RECON_SHIFT-3;
4257 /* note: block[0] is assumed to be positive */
4259 // block[0] = (block[0] + (q >> 1)) / q;
4261 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4262 // bias= 1<<(QMAT_SHIFT-1);
4263 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4264 length = s->intra_chroma_ac_vlc_length;
4265 last_length= s->intra_chroma_ac_vlc_last_length;
4267 length = s->intra_ac_vlc_length;
4268 last_length= s->intra_ac_vlc_last_length;
4273 length = s->inter_ac_vlc_length;
4274 last_length= s->inter_ac_vlc_last_length;
4276 last_non_zero = s->block_last_index[n];
4281 dc += (1<<(RECON_SHIFT-1));
4282 for(i=0; i<64; i++){
4283 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
4286 STOP_TIMER("memset rem[]")}
4289 for(i=0; i<64; i++){
4294 w= FFABS(weight[i]) + qns*one;
4295 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4298 // w=weight[i] = (63*qns + (w/2)) / w;
4301 av_assert2(w<(1<<6));
4304 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4310 for(i=start_i; i<=last_non_zero; i++){
4311 int j= perm_scantable[i];
4312 const int level= block[j];
4316 if(level<0) coeff= qmul*level - qadd;
4317 else coeff= qmul*level + qadd;
4318 run_tab[rle_index++]=run;
4321 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4327 if(last_non_zero>0){
4328 STOP_TIMER("init rem[]")
4335 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4338 int run2, best_unquant_change=0, analyze_gradient;
4342 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4344 if(analyze_gradient){
4348 for(i=0; i<64; i++){
4351 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4354 STOP_TIMER("rem*w*w")}
4364 const int level= block[0];
4365 int change, old_coeff;
4367 av_assert2(s->mb_intra);
4371 for(change=-1; change<=1; change+=2){
4372 int new_level= level + change;
4373 int score, new_coeff;
4375 new_coeff= q*new_level;
4376 if(new_coeff >= 2048 || new_coeff < 0)
4379 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4380 new_coeff - old_coeff);
4381 if(score<best_score){
4384 best_change= change;
4385 best_unquant_change= new_coeff - old_coeff;
4392 run2= run_tab[rle_index++];
4396 for(i=start_i; i<64; i++){
4397 int j= perm_scantable[i];
4398 const int level= block[j];
4399 int change, old_coeff;
4401 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4405 if(level<0) old_coeff= qmul*level - qadd;
4406 else old_coeff= qmul*level + qadd;
4407 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4411 av_assert2(run2>=0 || i >= last_non_zero );
4414 for(change=-1; change<=1; change+=2){
4415 int new_level= level + change;
4416 int score, new_coeff, unquant_change;
4419 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4423 if(new_level<0) new_coeff= qmul*new_level - qadd;
4424 else new_coeff= qmul*new_level + qadd;
4425 if(new_coeff >= 2048 || new_coeff <= -2048)
4427 //FIXME check for overflow
4430 if(level < 63 && level > -63){
4431 if(i < last_non_zero)
4432 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4433 - length[UNI_AC_ENC_INDEX(run, level+64)];
4435 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4436 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4439 av_assert2(FFABS(new_level)==1);
4441 if(analyze_gradient){
4442 int g= d1[ scantable[i] ];
4443 if(g && (g^new_level) >= 0)
4447 if(i < last_non_zero){
4448 int next_i= i + run2 + 1;
4449 int next_level= block[ perm_scantable[next_i] ] + 64;
4451 if(next_level&(~127))
4454 if(next_i < last_non_zero)
4455 score += length[UNI_AC_ENC_INDEX(run, 65)]
4456 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4457 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4459 score += length[UNI_AC_ENC_INDEX(run, 65)]
4460 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4461 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4463 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4465 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4466 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4472 av_assert2(FFABS(level)==1);
4474 if(i < last_non_zero){
4475 int next_i= i + run2 + 1;
4476 int next_level= block[ perm_scantable[next_i] ] + 64;
4478 if(next_level&(~127))
4481 if(next_i < last_non_zero)
4482 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4483 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4484 - length[UNI_AC_ENC_INDEX(run, 65)];
4486 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4487 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4488 - length[UNI_AC_ENC_INDEX(run, 65)];
4490 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4492 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4493 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4500 unquant_change= new_coeff - old_coeff;
4501 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4503 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4505 if(score<best_score){
4508 best_change= change;
4509 best_unquant_change= unquant_change;
4513 prev_level= level + 64;
4514 if(prev_level&(~127))
4523 STOP_TIMER("iterative step")}
4527 int j= perm_scantable[ best_coeff ];
4529 block[j] += best_change;
4531 if(best_coeff > last_non_zero){
4532 last_non_zero= best_coeff;
4533 av_assert2(block[j]);
4540 if(block[j] - best_change){
4541 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4553 for(; last_non_zero>=start_i; last_non_zero--){
4554 if(block[perm_scantable[last_non_zero]])
4560 if(256*256*256*64 % count == 0){
4561 av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4566 for(i=start_i; i<=last_non_zero; i++){
4567 int j= perm_scantable[i];
4568 const int level= block[j];
4571 run_tab[rle_index++]=run;
4578 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4584 if(last_non_zero>0){
4585 STOP_TIMER("iterative search")
4590 return last_non_zero;
4594 * Permute an 8x8 block according to permuatation.
4595 * @param block the block which will be permuted according to
4596 * the given permutation vector
4597 * @param permutation the permutation vector
4598 * @param last the last non zero coefficient in scantable order, used to
4599 * speed the permutation up
4600 * @param scantable the used scantable, this is only used to speed the
4601 * permutation up, the block is not (inverse) permutated
4602 * to scantable order!
4604 void ff_block_permute(int16_t *block, uint8_t *permutation,
4605 const uint8_t *scantable, int last)
4612 //FIXME it is ok but not clean and might fail for some permutations
4613 // if (permutation[1] == 1)
4616 for (i = 0; i <= last; i++) {
4617 const int j = scantable[i];
4622 for (i = 0; i <= last; i++) {
4623 const int j = scantable[i];
4624 const int perm_j = permutation[j];
4625 block[perm_j] = temp[j];
4629 int ff_dct_quantize_c(MpegEncContext *s,
4630 int16_t *block, int n,
4631 int qscale, int *overflow)
4633 int i, j, level, last_non_zero, q, start_i;
4635 const uint8_t *scantable= s->intra_scantable.scantable;
4638 unsigned int threshold1, threshold2;
4640 s->fdsp.fdct(block);
4642 if(s->dct_error_sum)
4643 s->denoise_dct(s, block);
4653 /* For AIC we skip quant/dequant of INTRADC */
4656 /* note: block[0] is assumed to be positive */
4657 block[0] = (block[0] + (q >> 1)) / q;
4660 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4661 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4665 qmat = s->q_inter_matrix[qscale];
4666 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4668 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4669 threshold2= (threshold1<<1);
4670 for(i=63;i>=start_i;i--) {
4672 level = block[j] * qmat[j];
4674 if(((unsigned)(level+threshold1))>threshold2){
4681 for(i=start_i; i<=last_non_zero; i++) {
4683 level = block[j] * qmat[j];
4685 // if( bias+level >= (1<<QMAT_SHIFT)
4686 // || bias-level >= (1<<QMAT_SHIFT)){
4687 if(((unsigned)(level+threshold1))>threshold2){
4689 level= (bias + level)>>QMAT_SHIFT;
4692 level= (bias - level)>>QMAT_SHIFT;
4700 *overflow= s->max_qcoeff < max; //overflow might have happened
4702 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4703 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4704 ff_block_permute(block, s->idsp.idct_permutation,
4705 scantable, last_non_zero);
4707 return last_non_zero;
4710 #define OFFSET(x) offsetof(MpegEncContext, x)
4711 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4712 static const AVOption h263_options[] = {
4713 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4714 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4719 static const AVClass h263_class = {
4720 .class_name = "H.263 encoder",
4721 .item_name = av_default_item_name,
4722 .option = h263_options,
4723 .version = LIBAVUTIL_VERSION_INT,
4726 AVCodec ff_h263_encoder = {
4728 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4729 .type = AVMEDIA_TYPE_VIDEO,
4730 .id = AV_CODEC_ID_H263,
4731 .priv_data_size = sizeof(MpegEncContext),
4732 .init = ff_mpv_encode_init,
4733 .encode2 = ff_mpv_encode_picture,
4734 .close = ff_mpv_encode_end,
4735 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4736 .priv_class = &h263_class,
4739 static const AVOption h263p_options[] = {
4740 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4741 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4742 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4743 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4747 static const AVClass h263p_class = {
4748 .class_name = "H.263p encoder",
4749 .item_name = av_default_item_name,
4750 .option = h263p_options,
4751 .version = LIBAVUTIL_VERSION_INT,
4754 AVCodec ff_h263p_encoder = {
4756 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4757 .type = AVMEDIA_TYPE_VIDEO,
4758 .id = AV_CODEC_ID_H263P,
4759 .priv_data_size = sizeof(MpegEncContext),
4760 .init = ff_mpv_encode_init,
4761 .encode2 = ff_mpv_encode_picture,
4762 .close = ff_mpv_encode_end,
4763 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4764 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4765 .priv_class = &h263p_class,
4768 static const AVClass msmpeg4v2_class = {
4769 .class_name = "msmpeg4v2 encoder",
4770 .item_name = av_default_item_name,
4771 .option = ff_mpv_generic_options,
4772 .version = LIBAVUTIL_VERSION_INT,
4775 AVCodec ff_msmpeg4v2_encoder = {
4776 .name = "msmpeg4v2",
4777 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4778 .type = AVMEDIA_TYPE_VIDEO,
4779 .id = AV_CODEC_ID_MSMPEG4V2,
4780 .priv_data_size = sizeof(MpegEncContext),
4781 .init = ff_mpv_encode_init,
4782 .encode2 = ff_mpv_encode_picture,
4783 .close = ff_mpv_encode_end,
4784 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4785 .priv_class = &msmpeg4v2_class,
4788 static const AVClass msmpeg4v3_class = {
4789 .class_name = "msmpeg4v3 encoder",
4790 .item_name = av_default_item_name,
4791 .option = ff_mpv_generic_options,
4792 .version = LIBAVUTIL_VERSION_INT,
4795 AVCodec ff_msmpeg4v3_encoder = {
4797 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4798 .type = AVMEDIA_TYPE_VIDEO,
4799 .id = AV_CODEC_ID_MSMPEG4V3,
4800 .priv_data_size = sizeof(MpegEncContext),
4801 .init = ff_mpv_encode_init,
4802 .encode2 = ff_mpv_encode_picture,
4803 .close = ff_mpv_encode_end,
4804 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4805 .priv_class = &msmpeg4v3_class,
4808 static const AVClass wmv1_class = {
4809 .class_name = "wmv1 encoder",
4810 .item_name = av_default_item_name,
4811 .option = ff_mpv_generic_options,
4812 .version = LIBAVUTIL_VERSION_INT,
4815 AVCodec ff_wmv1_encoder = {
4817 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4818 .type = AVMEDIA_TYPE_VIDEO,
4819 .id = AV_CODEC_ID_WMV1,
4820 .priv_data_size = sizeof(MpegEncContext),
4821 .init = ff_mpv_encode_init,
4822 .encode2 = ff_mpv_encode_picture,
4823 .close = ff_mpv_encode_end,
4824 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4825 .priv_class = &wmv1_class,