2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "libavutil/timer.h"
46 #include "mpegvideo.h"
47 #include "mpegvideodata.h"
51 #include "mjpegenc_common.h"
53 #include "mpegutils.h"
56 #include "pixblockdsp.h"
60 #include "aandcttab.h"
62 #include "mpeg4video.h"
64 #include "bytestream.h"
70 #define QUANT_BIAS_SHIFT 8
72 #define QMAT_SHIFT_MMX 16
75 static int encode_picture(MpegEncContext *s, int picture_number);
76 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
77 static int sse_mb(MpegEncContext *s);
78 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
79 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
81 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
82 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
84 const AVOption ff_mpv_generic_options[] = {
89 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
90 uint16_t (*qmat16)[2][64],
91 const uint16_t *quant_matrix,
92 int bias, int qmin, int qmax, int intra)
94 FDCTDSPContext *fdsp = &s->fdsp;
98 for (qscale = qmin; qscale <= qmax; qscale++) {
102 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
103 else qscale2 = qscale << 1;
105 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
107 fdsp->fdct == ff_faandct ||
108 #endif /* CONFIG_FAANDCT */
109 fdsp->fdct == ff_jpeg_fdct_islow_10) {
110 for (i = 0; i < 64; i++) {
111 const int j = s->idsp.idct_permutation[i];
112 int64_t den = (int64_t) qscale2 * quant_matrix[j];
113 /* 16 <= qscale * quant_matrix[i] <= 7905
114 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
115 * 19952 <= x <= 249205026
116 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
117 * 3444240 >= (1 << 36) / (x) >= 275 */
119 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
121 } else if (fdsp->fdct == ff_fdct_ifast) {
122 for (i = 0; i < 64; i++) {
123 const int j = s->idsp.idct_permutation[i];
124 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
125 /* 16 <= qscale * quant_matrix[i] <= 7905
126 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
127 * 19952 <= x <= 249205026
128 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
129 * 3444240 >= (1 << 36) / (x) >= 275 */
131 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
134 for (i = 0; i < 64; i++) {
135 const int j = s->idsp.idct_permutation[i];
136 int64_t den = (int64_t) qscale2 * quant_matrix[j];
137 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
138 * Assume x = qscale * quant_matrix[i]
140 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
141 * so 32768 >= (1 << 19) / (x) >= 67 */
142 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
143 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
144 // (qscale * quant_matrix[i]);
145 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
147 if (qmat16[qscale][0][i] == 0 ||
148 qmat16[qscale][0][i] == 128 * 256)
149 qmat16[qscale][0][i] = 128 * 256 - 1;
150 qmat16[qscale][1][i] =
151 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
152 qmat16[qscale][0][i]);
156 for (i = intra; i < 64; i++) {
158 if (fdsp->fdct == ff_fdct_ifast) {
159 max = (8191LL * ff_aanscales[i]) >> 14;
161 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
167 av_log(NULL, AV_LOG_INFO,
168 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
173 static inline void update_qscale(MpegEncContext *s)
175 if (s->q_scale_type == 1 && 0) {
177 int bestdiff=INT_MAX;
180 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
181 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
182 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
183 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
185 if (diff < bestdiff) {
192 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
193 (FF_LAMBDA_SHIFT + 7);
194 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
197 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
201 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
207 for (i = 0; i < 64; i++) {
208 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
215 * init s->current_picture.qscale_table from s->lambda_table
217 void ff_init_qscale_tab(MpegEncContext *s)
219 int8_t * const qscale_table = s->current_picture.qscale_table;
222 for (i = 0; i < s->mb_num; i++) {
223 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
224 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
225 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
230 static void update_duplicate_context_after_me(MpegEncContext *dst,
233 #define COPY(a) dst->a= src->a
235 COPY(current_picture);
241 COPY(picture_in_gop_number);
242 COPY(gop_picture_number);
243 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
244 COPY(progressive_frame); // FIXME don't set in encode_header
245 COPY(partitioned_frame); // FIXME don't set in encode_header
250 * Set the given MpegEncContext to defaults for encoding.
251 * the changed fields will not depend upon the prior state of the MpegEncContext.
253 static void mpv_encode_defaults(MpegEncContext *s)
256 ff_mpv_common_defaults(s);
258 for (i = -16; i < 16; i++) {
259 default_fcode_tab[i + MAX_MV] = 1;
261 s->me.mv_penalty = default_mv_penalty;
262 s->fcode_tab = default_fcode_tab;
264 s->input_picture_number = 0;
265 s->picture_in_gop_number = 0;
268 av_cold int ff_dct_encode_init(MpegEncContext *s) {
270 ff_dct_encode_init_x86(s);
272 if (CONFIG_H263_ENCODER)
273 ff_h263dsp_init(&s->h263dsp);
274 if (!s->dct_quantize)
275 s->dct_quantize = ff_dct_quantize_c;
277 s->denoise_dct = denoise_dct_c;
278 s->fast_dct_quantize = s->dct_quantize;
279 if (s->avctx->trellis)
280 s->dct_quantize = dct_quantize_trellis_c;
285 /* init video encoder */
286 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
288 MpegEncContext *s = avctx->priv_data;
289 AVCPBProperties *cpb_props;
290 int i, ret, format_supported;
292 mpv_encode_defaults(s);
294 switch (avctx->codec_id) {
295 case AV_CODEC_ID_MPEG2VIDEO:
296 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
297 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
298 av_log(avctx, AV_LOG_ERROR,
299 "only YUV420 and YUV422 are supported\n");
303 case AV_CODEC_ID_MJPEG:
304 case AV_CODEC_ID_AMV:
305 format_supported = 0;
306 /* JPEG color space */
307 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
308 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
309 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
310 (avctx->color_range == AVCOL_RANGE_JPEG &&
311 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
312 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
313 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
314 format_supported = 1;
315 /* MPEG color space */
316 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
317 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
318 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
319 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
320 format_supported = 1;
322 if (!format_supported) {
323 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
328 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
329 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
334 switch (avctx->pix_fmt) {
335 case AV_PIX_FMT_YUVJ444P:
336 case AV_PIX_FMT_YUV444P:
337 s->chroma_format = CHROMA_444;
339 case AV_PIX_FMT_YUVJ422P:
340 case AV_PIX_FMT_YUV422P:
341 s->chroma_format = CHROMA_422;
343 case AV_PIX_FMT_YUVJ420P:
344 case AV_PIX_FMT_YUV420P:
346 s->chroma_format = CHROMA_420;
350 s->bit_rate = avctx->bit_rate;
351 s->width = avctx->width;
352 s->height = avctx->height;
353 if (avctx->gop_size > 600 &&
354 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
355 av_log(avctx, AV_LOG_WARNING,
356 "keyframe interval too large!, reducing it from %d to %d\n",
357 avctx->gop_size, 600);
358 avctx->gop_size = 600;
360 s->gop_size = avctx->gop_size;
362 if (avctx->max_b_frames > MAX_B_FRAMES) {
363 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
364 "is %d.\n", MAX_B_FRAMES);
365 avctx->max_b_frames = MAX_B_FRAMES;
367 s->max_b_frames = avctx->max_b_frames;
368 s->codec_id = avctx->codec->id;
369 s->strict_std_compliance = avctx->strict_std_compliance;
370 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
371 s->mpeg_quant = avctx->mpeg_quant;
372 s->rtp_mode = !!avctx->rtp_payload_size;
373 s->intra_dc_precision = avctx->intra_dc_precision;
375 // workaround some differences between how applications specify dc precision
376 if (s->intra_dc_precision < 0) {
377 s->intra_dc_precision += 8;
378 } else if (s->intra_dc_precision >= 8)
379 s->intra_dc_precision -= 8;
381 if (s->intra_dc_precision < 0) {
382 av_log(avctx, AV_LOG_ERROR,
383 "intra dc precision must be positive, note some applications use"
384 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
385 return AVERROR(EINVAL);
388 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
389 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
390 return AVERROR(EINVAL);
392 s->user_specified_pts = AV_NOPTS_VALUE;
394 if (s->gop_size <= 1) {
401 #if FF_API_MOTION_EST
402 FF_DISABLE_DEPRECATION_WARNINGS
403 s->me_method = avctx->me_method;
404 FF_ENABLE_DEPRECATION_WARNINGS
408 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
411 FF_DISABLE_DEPRECATION_WARNINGS
412 if (avctx->border_masking != 0.0)
413 s->border_masking = avctx->border_masking;
414 FF_ENABLE_DEPRECATION_WARNINGS
417 s->adaptive_quant = (s->avctx->lumi_masking ||
418 s->avctx->dark_masking ||
419 s->avctx->temporal_cplx_masking ||
420 s->avctx->spatial_cplx_masking ||
421 s->avctx->p_masking ||
423 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
426 s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
428 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
429 switch(avctx->codec_id) {
430 case AV_CODEC_ID_MPEG1VIDEO:
431 case AV_CODEC_ID_MPEG2VIDEO:
432 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
434 case AV_CODEC_ID_MPEG4:
435 case AV_CODEC_ID_MSMPEG4V1:
436 case AV_CODEC_ID_MSMPEG4V2:
437 case AV_CODEC_ID_MSMPEG4V3:
438 if (avctx->rc_max_rate >= 15000000) {
439 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
440 } else if(avctx->rc_max_rate >= 2000000) {
441 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
442 } else if(avctx->rc_max_rate >= 384000) {
443 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
445 avctx->rc_buffer_size = 40;
446 avctx->rc_buffer_size *= 16384;
449 if (avctx->rc_buffer_size) {
450 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
454 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
455 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
459 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
460 av_log(avctx, AV_LOG_INFO,
461 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
464 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
465 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
469 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
470 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
474 if (avctx->rc_max_rate &&
475 avctx->rc_max_rate == avctx->bit_rate &&
476 avctx->rc_max_rate != avctx->rc_min_rate) {
477 av_log(avctx, AV_LOG_INFO,
478 "impossible bitrate constraints, this will fail\n");
481 if (avctx->rc_buffer_size &&
482 avctx->bit_rate * (int64_t)avctx->time_base.num >
483 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
484 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
488 if (!s->fixed_qscale &&
489 avctx->bit_rate * av_q2d(avctx->time_base) >
490 avctx->bit_rate_tolerance) {
491 av_log(avctx, AV_LOG_WARNING,
492 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, (int64_t)avctx->bit_rate);
493 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
496 if (s->avctx->rc_max_rate &&
497 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
498 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
499 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
500 90000LL * (avctx->rc_buffer_size - 1) >
501 s->avctx->rc_max_rate * 0xFFFFLL) {
502 av_log(avctx, AV_LOG_INFO,
503 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
504 "specified vbv buffer is too large for the given bitrate!\n");
507 if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
508 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
509 s->codec_id != AV_CODEC_ID_FLV1) {
510 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
514 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
515 av_log(avctx, AV_LOG_ERROR,
516 "OBMC is only supported with simple mb decision\n");
520 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
521 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
525 if (s->max_b_frames &&
526 s->codec_id != AV_CODEC_ID_MPEG4 &&
527 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
528 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
529 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
532 if (s->max_b_frames < 0) {
533 av_log(avctx, AV_LOG_ERROR,
534 "max b frames must be 0 or positive for mpegvideo based encoders\n");
538 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
539 s->codec_id == AV_CODEC_ID_H263 ||
540 s->codec_id == AV_CODEC_ID_H263P) &&
541 (avctx->sample_aspect_ratio.num > 255 ||
542 avctx->sample_aspect_ratio.den > 255)) {
543 av_log(avctx, AV_LOG_WARNING,
544 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
545 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
546 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
547 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
550 if ((s->codec_id == AV_CODEC_ID_H263 ||
551 s->codec_id == AV_CODEC_ID_H263P) &&
552 (avctx->width > 2048 ||
553 avctx->height > 1152 )) {
554 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
557 if ((s->codec_id == AV_CODEC_ID_H263 ||
558 s->codec_id == AV_CODEC_ID_H263P) &&
559 ((avctx->width &3) ||
560 (avctx->height&3) )) {
561 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
565 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
566 (avctx->width > 4095 ||
567 avctx->height > 4095 )) {
568 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
572 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
573 (avctx->width > 16383 ||
574 avctx->height > 16383 )) {
575 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
579 if (s->codec_id == AV_CODEC_ID_RV10 &&
581 avctx->height&15 )) {
582 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
583 return AVERROR(EINVAL);
586 if (s->codec_id == AV_CODEC_ID_RV20 &&
589 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
590 return AVERROR(EINVAL);
593 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
594 s->codec_id == AV_CODEC_ID_WMV2) &&
596 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
600 if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
601 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
602 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
606 // FIXME mpeg2 uses that too
607 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
608 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
609 av_log(avctx, AV_LOG_ERROR,
610 "mpeg2 style quantization not supported by codec\n");
614 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
615 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
619 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
620 s->avctx->mb_decision != FF_MB_DECISION_RD) {
621 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
625 if (s->avctx->scenechange_threshold < 1000000000 &&
626 (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
627 av_log(avctx, AV_LOG_ERROR,
628 "closed gop with scene change detection are not supported yet, "
629 "set threshold to 1000000000\n");
633 if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
634 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
635 av_log(avctx, AV_LOG_ERROR,
636 "low delay forcing is only available for mpeg2\n");
639 if (s->max_b_frames != 0) {
640 av_log(avctx, AV_LOG_ERROR,
641 "b frames cannot be used with low delay\n");
646 if (s->q_scale_type == 1) {
647 if (avctx->qmax > 28) {
648 av_log(avctx, AV_LOG_ERROR,
649 "non linear quant only supports qmax <= 28 currently\n");
654 if (avctx->slices > 1 &&
655 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
656 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
657 return AVERROR(EINVAL);
660 if (s->avctx->thread_count > 1 &&
661 s->codec_id != AV_CODEC_ID_MPEG4 &&
662 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
663 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
664 s->codec_id != AV_CODEC_ID_MJPEG &&
665 (s->codec_id != AV_CODEC_ID_H263P)) {
666 av_log(avctx, AV_LOG_ERROR,
667 "multi threaded encoding not supported by codec\n");
671 if (s->avctx->thread_count < 1) {
672 av_log(avctx, AV_LOG_ERROR,
673 "automatic thread number detection not supported by codec, "
678 if (!avctx->time_base.den || !avctx->time_base.num) {
679 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
683 if (avctx->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
684 av_log(avctx, AV_LOG_INFO,
685 "notice: b_frame_strategy only affects the first pass\n");
686 avctx->b_frame_strategy = 0;
689 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
691 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
692 avctx->time_base.den /= i;
693 avctx->time_base.num /= i;
697 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
698 // (a + x * 3 / 8) / x
699 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
700 s->inter_quant_bias = 0;
702 s->intra_quant_bias = 0;
704 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
707 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
708 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
709 return AVERROR(EINVAL);
712 #if FF_API_QUANT_BIAS
713 FF_DISABLE_DEPRECATION_WARNINGS
714 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
715 s->intra_quant_bias = avctx->intra_quant_bias;
716 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
717 s->inter_quant_bias = avctx->inter_quant_bias;
718 FF_ENABLE_DEPRECATION_WARNINGS
721 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
723 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
724 s->avctx->time_base.den > (1 << 16) - 1) {
725 av_log(avctx, AV_LOG_ERROR,
726 "timebase %d/%d not supported by MPEG 4 standard, "
727 "the maximum admitted value for the timebase denominator "
728 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
732 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
734 switch (avctx->codec->id) {
735 case AV_CODEC_ID_MPEG1VIDEO:
736 s->out_format = FMT_MPEG1;
737 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
738 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
740 case AV_CODEC_ID_MPEG2VIDEO:
741 s->out_format = FMT_MPEG1;
742 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
743 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
746 case AV_CODEC_ID_MJPEG:
747 case AV_CODEC_ID_AMV:
748 s->out_format = FMT_MJPEG;
749 s->intra_only = 1; /* force intra only for jpeg */
750 if (!CONFIG_MJPEG_ENCODER ||
751 ff_mjpeg_encode_init(s) < 0)
756 case AV_CODEC_ID_H261:
757 if (!CONFIG_H261_ENCODER)
759 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
760 av_log(avctx, AV_LOG_ERROR,
761 "The specified picture size of %dx%d is not valid for the "
762 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
763 s->width, s->height);
766 s->out_format = FMT_H261;
769 s->rtp_mode = 0; /* Sliced encoding not supported */
771 case AV_CODEC_ID_H263:
772 if (!CONFIG_H263_ENCODER)
774 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
775 s->width, s->height) == 8) {
776 av_log(avctx, AV_LOG_ERROR,
777 "The specified picture size of %dx%d is not valid for "
778 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
779 "352x288, 704x576, and 1408x1152. "
780 "Try H.263+.\n", s->width, s->height);
783 s->out_format = FMT_H263;
787 case AV_CODEC_ID_H263P:
788 s->out_format = FMT_H263;
791 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
792 s->modified_quant = s->h263_aic;
793 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
794 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
797 /* These are just to be sure */
801 case AV_CODEC_ID_FLV1:
802 s->out_format = FMT_H263;
803 s->h263_flv = 2; /* format = 1; 11-bit codes */
804 s->unrestricted_mv = 1;
805 s->rtp_mode = 0; /* don't allow GOB */
809 case AV_CODEC_ID_RV10:
810 s->out_format = FMT_H263;
814 case AV_CODEC_ID_RV20:
815 s->out_format = FMT_H263;
818 s->modified_quant = 1;
822 s->unrestricted_mv = 0;
824 case AV_CODEC_ID_MPEG4:
825 s->out_format = FMT_H263;
827 s->unrestricted_mv = 1;
828 s->low_delay = s->max_b_frames ? 0 : 1;
829 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
831 case AV_CODEC_ID_MSMPEG4V2:
832 s->out_format = FMT_H263;
834 s->unrestricted_mv = 1;
835 s->msmpeg4_version = 2;
839 case AV_CODEC_ID_MSMPEG4V3:
840 s->out_format = FMT_H263;
842 s->unrestricted_mv = 1;
843 s->msmpeg4_version = 3;
844 s->flipflop_rounding = 1;
848 case AV_CODEC_ID_WMV1:
849 s->out_format = FMT_H263;
851 s->unrestricted_mv = 1;
852 s->msmpeg4_version = 4;
853 s->flipflop_rounding = 1;
857 case AV_CODEC_ID_WMV2:
858 s->out_format = FMT_H263;
860 s->unrestricted_mv = 1;
861 s->msmpeg4_version = 5;
862 s->flipflop_rounding = 1;
870 avctx->has_b_frames = !s->low_delay;
874 s->progressive_frame =
875 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
876 AV_CODEC_FLAG_INTERLACED_ME) ||
881 if (ff_mpv_common_init(s) < 0)
884 ff_fdctdsp_init(&s->fdsp, avctx);
885 ff_me_cmp_init(&s->mecc, avctx);
886 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
887 ff_pixblockdsp_init(&s->pdsp, avctx);
888 ff_qpeldsp_init(&s->qdsp);
890 if (s->msmpeg4_version) {
891 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
892 2 * 2 * (MAX_LEVEL + 1) *
893 (MAX_RUN + 1) * 2 * sizeof(int), fail);
895 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
897 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
898 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
899 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
900 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
901 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
902 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
903 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
904 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
905 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
906 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
908 if (s->avctx->noise_reduction) {
909 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
910 2 * 64 * sizeof(uint16_t), fail);
913 ff_dct_encode_init(s);
915 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
916 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
918 if (s->slice_context_count > 1) {
921 if (avctx->codec_id == AV_CODEC_ID_H263P)
922 s->h263_slice_structured = 1;
925 s->quant_precision = 5;
927 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
928 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->avctx->frame_skip_cmp);
930 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
931 ff_h261_encode_init(s);
932 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
933 ff_h263_encode_init(s);
934 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
935 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
937 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
938 && s->out_format == FMT_MPEG1)
939 ff_mpeg1_encode_init(s);
942 for (i = 0; i < 64; i++) {
943 int j = s->idsp.idct_permutation[i];
944 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
946 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
947 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
948 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
950 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
953 s->chroma_intra_matrix[j] =
954 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
955 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
957 if (s->avctx->intra_matrix)
958 s->intra_matrix[j] = s->avctx->intra_matrix[i];
959 if (s->avctx->inter_matrix)
960 s->inter_matrix[j] = s->avctx->inter_matrix[i];
963 /* precompute matrix */
964 /* for mjpeg, we do include qscale in the matrix */
965 if (s->out_format != FMT_MJPEG) {
966 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
967 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
969 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
970 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
974 if (ff_rate_control_init(s) < 0)
977 #if FF_API_ERROR_RATE
978 FF_DISABLE_DEPRECATION_WARNINGS
979 if (avctx->error_rate)
980 s->error_rate = avctx->error_rate;
981 FF_ENABLE_DEPRECATION_WARNINGS;
984 #if FF_API_NORMALIZE_AQP
985 FF_DISABLE_DEPRECATION_WARNINGS
986 if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
987 s->mpv_flags |= FF_MPV_FLAG_NAQ;
988 FF_ENABLE_DEPRECATION_WARNINGS;
992 FF_DISABLE_DEPRECATION_WARNINGS
993 if (avctx->flags & CODEC_FLAG_MV0)
994 s->mpv_flags |= FF_MPV_FLAG_MV0;
995 FF_ENABLE_DEPRECATION_WARNINGS
999 FF_DISABLE_DEPRECATION_WARNINGS
1000 if (avctx->rc_qsquish != 0.0)
1001 s->rc_qsquish = avctx->rc_qsquish;
1002 if (avctx->rc_qmod_amp != 0.0)
1003 s->rc_qmod_amp = avctx->rc_qmod_amp;
1004 if (avctx->rc_qmod_freq)
1005 s->rc_qmod_freq = avctx->rc_qmod_freq;
1006 if (avctx->rc_buffer_aggressivity != 1.0)
1007 s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
1008 if (avctx->rc_initial_cplx != 0.0)
1009 s->rc_initial_cplx = avctx->rc_initial_cplx;
1011 s->lmin = avctx->lmin;
1013 s->lmax = avctx->lmax;
1016 av_freep(&s->rc_eq);
1017 s->rc_eq = av_strdup(avctx->rc_eq);
1019 return AVERROR(ENOMEM);
1021 FF_ENABLE_DEPRECATION_WARNINGS
1024 if (avctx->b_frame_strategy == 2) {
1025 for (i = 0; i < s->max_b_frames + 2; i++) {
1026 s->tmp_frames[i] = av_frame_alloc();
1027 if (!s->tmp_frames[i])
1028 return AVERROR(ENOMEM);
1030 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1031 s->tmp_frames[i]->width = s->width >> avctx->brd_scale;
1032 s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
1034 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1040 cpb_props = ff_add_cpb_side_data(avctx);
1042 return AVERROR(ENOMEM);
1043 cpb_props->max_bitrate = avctx->rc_max_rate;
1044 cpb_props->min_bitrate = avctx->rc_min_rate;
1045 cpb_props->avg_bitrate = avctx->bit_rate;
1046 cpb_props->buffer_size = avctx->rc_buffer_size;
1050 ff_mpv_encode_end(avctx);
1051 return AVERROR_UNKNOWN;
1054 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1056 MpegEncContext *s = avctx->priv_data;
1059 ff_rate_control_uninit(s);
1061 ff_mpv_common_end(s);
1062 if (CONFIG_MJPEG_ENCODER &&
1063 s->out_format == FMT_MJPEG)
1064 ff_mjpeg_encode_close(s);
1066 av_freep(&avctx->extradata);
1068 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1069 av_frame_free(&s->tmp_frames[i]);
1071 ff_free_picture_tables(&s->new_picture);
1072 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1074 av_freep(&s->avctx->stats_out);
1075 av_freep(&s->ac_stats);
1077 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1078 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1079 s->q_chroma_intra_matrix= NULL;
1080 s->q_chroma_intra_matrix16= NULL;
1081 av_freep(&s->q_intra_matrix);
1082 av_freep(&s->q_inter_matrix);
1083 av_freep(&s->q_intra_matrix16);
1084 av_freep(&s->q_inter_matrix16);
1085 av_freep(&s->input_picture);
1086 av_freep(&s->reordered_input_picture);
1087 av_freep(&s->dct_offset);
1092 static int get_sae(uint8_t *src, int ref, int stride)
1097 for (y = 0; y < 16; y++) {
1098 for (x = 0; x < 16; x++) {
1099 acc += FFABS(src[x + y * stride] - ref);
1106 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1107 uint8_t *ref, int stride)
1113 h = s->height & ~15;
1115 for (y = 0; y < h; y += 16) {
1116 for (x = 0; x < w; x += 16) {
1117 int offset = x + y * stride;
1118 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1120 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1121 int sae = get_sae(src + offset, mean, stride);
1123 acc += sae + 500 < sad;
1129 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1131 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1132 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1133 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1134 &s->linesize, &s->uvlinesize);
1137 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1139 Picture *pic = NULL;
1141 int i, display_picture_number = 0, ret;
1142 int encoding_delay = s->max_b_frames ? s->max_b_frames
1143 : (s->low_delay ? 0 : 1);
1144 int flush_offset = 1;
1149 display_picture_number = s->input_picture_number++;
1151 if (pts != AV_NOPTS_VALUE) {
1152 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1153 int64_t last = s->user_specified_pts;
1156 av_log(s->avctx, AV_LOG_ERROR,
1157 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1159 return AVERROR(EINVAL);
1162 if (!s->low_delay && display_picture_number == 1)
1163 s->dts_delta = pts - last;
1165 s->user_specified_pts = pts;
1167 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1168 s->user_specified_pts =
1169 pts = s->user_specified_pts + 1;
1170 av_log(s->avctx, AV_LOG_INFO,
1171 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1174 pts = display_picture_number;
1178 if (!pic_arg->buf[0] ||
1179 pic_arg->linesize[0] != s->linesize ||
1180 pic_arg->linesize[1] != s->uvlinesize ||
1181 pic_arg->linesize[2] != s->uvlinesize)
1183 if ((s->width & 15) || (s->height & 15))
1185 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1187 if (s->linesize & (STRIDE_ALIGN-1))
1190 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1191 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1193 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1197 pic = &s->picture[i];
1201 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1204 ret = alloc_picture(s, pic, direct);
1209 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1210 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1211 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1214 int h_chroma_shift, v_chroma_shift;
1215 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1219 for (i = 0; i < 3; i++) {
1220 int src_stride = pic_arg->linesize[i];
1221 int dst_stride = i ? s->uvlinesize : s->linesize;
1222 int h_shift = i ? h_chroma_shift : 0;
1223 int v_shift = i ? v_chroma_shift : 0;
1224 int w = s->width >> h_shift;
1225 int h = s->height >> v_shift;
1226 uint8_t *src = pic_arg->data[i];
1227 uint8_t *dst = pic->f->data[i];
1230 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1231 && !s->progressive_sequence
1232 && FFALIGN(s->height, 32) - s->height > 16)
1235 if (!s->avctx->rc_buffer_size)
1236 dst += INPLACE_OFFSET;
1238 if (src_stride == dst_stride)
1239 memcpy(dst, src, src_stride * h);
1242 uint8_t *dst2 = dst;
1244 memcpy(dst2, src, w);
1249 if ((s->width & 15) || (s->height & (vpad-1))) {
1250 s->mpvencdsp.draw_edges(dst, dst_stride,
1259 ret = av_frame_copy_props(pic->f, pic_arg);
1263 pic->f->display_picture_number = display_picture_number;
1264 pic->f->pts = pts; // we set this here to avoid modifiying pic_arg
1266 /* Flushing: When we have not received enough input frames,
1267 * ensure s->input_picture[0] contains the first picture */
1268 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1269 if (s->input_picture[flush_offset])
1272 if (flush_offset <= 1)
1275 encoding_delay = encoding_delay - flush_offset + 1;
1278 /* shift buffer entries */
1279 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1280 s->input_picture[i - flush_offset] = s->input_picture[i];
1282 s->input_picture[encoding_delay] = (Picture*) pic;
1287 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1291 int64_t score64 = 0;
1293 for (plane = 0; plane < 3; plane++) {
1294 const int stride = p->f->linesize[plane];
1295 const int bw = plane ? 1 : 2;
1296 for (y = 0; y < s->mb_height * bw; y++) {
1297 for (x = 0; x < s->mb_width * bw; x++) {
1298 int off = p->shared ? 0 : 16;
1299 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1300 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1301 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1303 switch (FFABS(s->avctx->frame_skip_exp)) {
1304 case 0: score = FFMAX(score, v); break;
1305 case 1: score += FFABS(v); break;
1306 case 2: score64 += v * (int64_t)v; break;
1307 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1308 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1317 if (s->avctx->frame_skip_exp < 0)
1318 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1319 -1.0/s->avctx->frame_skip_exp);
1321 if (score64 < s->avctx->frame_skip_threshold)
1323 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1328 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1330 AVPacket pkt = { 0 };
1331 int ret, got_output;
1333 av_init_packet(&pkt);
1334 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1339 av_packet_unref(&pkt);
1343 static int estimate_best_b_count(MpegEncContext *s)
1345 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1346 AVCodecContext *c = avcodec_alloc_context3(NULL);
1347 const int scale = s->avctx->brd_scale;
1348 int i, j, out_size, p_lambda, b_lambda, lambda2;
1349 int64_t best_rd = INT64_MAX;
1350 int best_b_count = -1;
1353 return AVERROR(ENOMEM);
1354 av_assert0(scale >= 0 && scale <= 3);
1357 //s->next_picture_ptr->quality;
1358 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1359 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1360 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1361 if (!b_lambda) // FIXME we should do this somewhere else
1362 b_lambda = p_lambda;
1363 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1366 c->width = s->width >> scale;
1367 c->height = s->height >> scale;
1368 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1369 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1370 c->mb_decision = s->avctx->mb_decision;
1371 c->me_cmp = s->avctx->me_cmp;
1372 c->mb_cmp = s->avctx->mb_cmp;
1373 c->me_sub_cmp = s->avctx->me_sub_cmp;
1374 c->pix_fmt = AV_PIX_FMT_YUV420P;
1375 c->time_base = s->avctx->time_base;
1376 c->max_b_frames = s->max_b_frames;
1378 if (avcodec_open2(c, codec, NULL) < 0)
1381 for (i = 0; i < s->max_b_frames + 2; i++) {
1382 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1383 s->next_picture_ptr;
1386 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1387 pre_input = *pre_input_ptr;
1388 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1390 if (!pre_input.shared && i) {
1391 data[0] += INPLACE_OFFSET;
1392 data[1] += INPLACE_OFFSET;
1393 data[2] += INPLACE_OFFSET;
1396 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1397 s->tmp_frames[i]->linesize[0],
1399 pre_input.f->linesize[0],
1400 c->width, c->height);
1401 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1402 s->tmp_frames[i]->linesize[1],
1404 pre_input.f->linesize[1],
1405 c->width >> 1, c->height >> 1);
1406 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1407 s->tmp_frames[i]->linesize[2],
1409 pre_input.f->linesize[2],
1410 c->width >> 1, c->height >> 1);
1414 for (j = 0; j < s->max_b_frames + 1; j++) {
1417 if (!s->input_picture[j])
1420 c->error[0] = c->error[1] = c->error[2] = 0;
1422 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1423 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1425 out_size = encode_frame(c, s->tmp_frames[0]);
1427 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1429 for (i = 0; i < s->max_b_frames + 1; i++) {
1430 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1432 s->tmp_frames[i + 1]->pict_type = is_p ?
1433 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1434 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1436 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1438 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1441 /* get the delayed frames */
1443 out_size = encode_frame(c, NULL);
1444 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1447 rd += c->error[0] + c->error[1] + c->error[2];
1458 return best_b_count;
1461 static int select_input_picture(MpegEncContext *s)
1465 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1466 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1467 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1469 /* set next picture type & ordering */
1470 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1471 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1472 if (s->picture_in_gop_number < s->gop_size &&
1473 s->next_picture_ptr &&
1474 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1475 // FIXME check that te gop check above is +-1 correct
1476 av_frame_unref(s->input_picture[0]->f);
1478 ff_vbv_update(s, 0);
1484 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1485 !s->next_picture_ptr || s->intra_only) {
1486 s->reordered_input_picture[0] = s->input_picture[0];
1487 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1488 s->reordered_input_picture[0]->f->coded_picture_number =
1489 s->coded_picture_number++;
1493 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1494 for (i = 0; i < s->max_b_frames + 1; i++) {
1495 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1497 if (pict_num >= s->rc_context.num_entries)
1499 if (!s->input_picture[i]) {
1500 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1504 s->input_picture[i]->f->pict_type =
1505 s->rc_context.entry[pict_num].new_pict_type;
1509 if (s->avctx->b_frame_strategy == 0) {
1510 b_frames = s->max_b_frames;
1511 while (b_frames && !s->input_picture[b_frames])
1513 } else if (s->avctx->b_frame_strategy == 1) {
1514 for (i = 1; i < s->max_b_frames + 1; i++) {
1515 if (s->input_picture[i] &&
1516 s->input_picture[i]->b_frame_score == 0) {
1517 s->input_picture[i]->b_frame_score =
1519 s->input_picture[i ]->f->data[0],
1520 s->input_picture[i - 1]->f->data[0],
1524 for (i = 0; i < s->max_b_frames + 1; i++) {
1525 if (!s->input_picture[i] ||
1526 s->input_picture[i]->b_frame_score - 1 >
1527 s->mb_num / s->avctx->b_sensitivity)
1531 b_frames = FFMAX(0, i - 1);
1534 for (i = 0; i < b_frames + 1; i++) {
1535 s->input_picture[i]->b_frame_score = 0;
1537 } else if (s->avctx->b_frame_strategy == 2) {
1538 b_frames = estimate_best_b_count(s);
1540 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1546 for (i = b_frames - 1; i >= 0; i--) {
1547 int type = s->input_picture[i]->f->pict_type;
1548 if (type && type != AV_PICTURE_TYPE_B)
1551 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1552 b_frames == s->max_b_frames) {
1553 av_log(s->avctx, AV_LOG_ERROR,
1554 "warning, too many b frames in a row\n");
1557 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1558 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1559 s->gop_size > s->picture_in_gop_number) {
1560 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1562 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1564 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1568 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1569 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1572 s->reordered_input_picture[0] = s->input_picture[b_frames];
1573 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1574 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1575 s->reordered_input_picture[0]->f->coded_picture_number =
1576 s->coded_picture_number++;
1577 for (i = 0; i < b_frames; i++) {
1578 s->reordered_input_picture[i + 1] = s->input_picture[i];
1579 s->reordered_input_picture[i + 1]->f->pict_type =
1581 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1582 s->coded_picture_number++;
1587 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1589 if (s->reordered_input_picture[0]) {
1590 s->reordered_input_picture[0]->reference =
1591 s->reordered_input_picture[0]->f->pict_type !=
1592 AV_PICTURE_TYPE_B ? 3 : 0;
1594 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1597 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1598 // input is a shared pix, so we can't modifiy it -> alloc a new
1599 // one & ensure that the shared one is reuseable
1602 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1605 pic = &s->picture[i];
1607 pic->reference = s->reordered_input_picture[0]->reference;
1608 if (alloc_picture(s, pic, 0) < 0) {
1612 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1616 /* mark us unused / free shared pic */
1617 av_frame_unref(s->reordered_input_picture[0]->f);
1618 s->reordered_input_picture[0]->shared = 0;
1620 s->current_picture_ptr = pic;
1622 // input is not a shared pix -> reuse buffer for current_pix
1623 s->current_picture_ptr = s->reordered_input_picture[0];
1624 for (i = 0; i < 4; i++) {
1625 s->new_picture.f->data[i] += INPLACE_OFFSET;
1628 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1629 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1630 s->current_picture_ptr)) < 0)
1633 s->picture_number = s->new_picture.f->display_picture_number;
1638 static void frame_end(MpegEncContext *s)
1640 if (s->unrestricted_mv &&
1641 s->current_picture.reference &&
1643 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1644 int hshift = desc->log2_chroma_w;
1645 int vshift = desc->log2_chroma_h;
1646 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1647 s->current_picture.f->linesize[0],
1648 s->h_edge_pos, s->v_edge_pos,
1649 EDGE_WIDTH, EDGE_WIDTH,
1650 EDGE_TOP | EDGE_BOTTOM);
1651 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1652 s->current_picture.f->linesize[1],
1653 s->h_edge_pos >> hshift,
1654 s->v_edge_pos >> vshift,
1655 EDGE_WIDTH >> hshift,
1656 EDGE_WIDTH >> vshift,
1657 EDGE_TOP | EDGE_BOTTOM);
1658 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1659 s->current_picture.f->linesize[2],
1660 s->h_edge_pos >> hshift,
1661 s->v_edge_pos >> vshift,
1662 EDGE_WIDTH >> hshift,
1663 EDGE_WIDTH >> vshift,
1664 EDGE_TOP | EDGE_BOTTOM);
1669 s->last_pict_type = s->pict_type;
1670 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1671 if (s->pict_type!= AV_PICTURE_TYPE_B)
1672 s->last_non_b_pict_type = s->pict_type;
1674 #if FF_API_CODED_FRAME
1675 FF_DISABLE_DEPRECATION_WARNINGS
1676 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1677 FF_ENABLE_DEPRECATION_WARNINGS
1679 #if FF_API_ERROR_FRAME
1680 FF_DISABLE_DEPRECATION_WARNINGS
1681 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1682 sizeof(s->current_picture.encoding_error));
1683 FF_ENABLE_DEPRECATION_WARNINGS
1687 static void update_noise_reduction(MpegEncContext *s)
1691 for (intra = 0; intra < 2; intra++) {
1692 if (s->dct_count[intra] > (1 << 16)) {
1693 for (i = 0; i < 64; i++) {
1694 s->dct_error_sum[intra][i] >>= 1;
1696 s->dct_count[intra] >>= 1;
1699 for (i = 0; i < 64; i++) {
1700 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1701 s->dct_count[intra] +
1702 s->dct_error_sum[intra][i] / 2) /
1703 (s->dct_error_sum[intra][i] + 1);
1708 static int frame_start(MpegEncContext *s)
1712 /* mark & release old frames */
1713 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1714 s->last_picture_ptr != s->next_picture_ptr &&
1715 s->last_picture_ptr->f->buf[0]) {
1716 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1719 s->current_picture_ptr->f->pict_type = s->pict_type;
1720 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1722 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1723 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1724 s->current_picture_ptr)) < 0)
1727 if (s->pict_type != AV_PICTURE_TYPE_B) {
1728 s->last_picture_ptr = s->next_picture_ptr;
1730 s->next_picture_ptr = s->current_picture_ptr;
1733 if (s->last_picture_ptr) {
1734 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1735 if (s->last_picture_ptr->f->buf[0] &&
1736 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1737 s->last_picture_ptr)) < 0)
1740 if (s->next_picture_ptr) {
1741 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1742 if (s->next_picture_ptr->f->buf[0] &&
1743 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1744 s->next_picture_ptr)) < 0)
1748 if (s->picture_structure!= PICT_FRAME) {
1750 for (i = 0; i < 4; i++) {
1751 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1752 s->current_picture.f->data[i] +=
1753 s->current_picture.f->linesize[i];
1755 s->current_picture.f->linesize[i] *= 2;
1756 s->last_picture.f->linesize[i] *= 2;
1757 s->next_picture.f->linesize[i] *= 2;
1761 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1762 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1763 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1764 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1765 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1766 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1768 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1769 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1772 if (s->dct_error_sum) {
1773 av_assert2(s->avctx->noise_reduction && s->encoding);
1774 update_noise_reduction(s);
1780 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1781 const AVFrame *pic_arg, int *got_packet)
1783 MpegEncContext *s = avctx->priv_data;
1784 int i, stuffing_count, ret;
1785 int context_count = s->slice_context_count;
1787 s->vbv_ignore_qmax = 0;
1789 s->picture_in_gop_number++;
1791 if (load_input_picture(s, pic_arg) < 0)
1794 if (select_input_picture(s) < 0) {
1799 if (s->new_picture.f->data[0]) {
1800 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1801 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1803 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1804 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1807 s->mb_info_ptr = av_packet_new_side_data(pkt,
1808 AV_PKT_DATA_H263_MB_INFO,
1809 s->mb_width*s->mb_height*12);
1810 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1813 for (i = 0; i < context_count; i++) {
1814 int start_y = s->thread_context[i]->start_mb_y;
1815 int end_y = s->thread_context[i]-> end_mb_y;
1816 int h = s->mb_height;
1817 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1818 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1820 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1823 s->pict_type = s->new_picture.f->pict_type;
1825 ret = frame_start(s);
1829 ret = encode_picture(s, s->picture_number);
1830 if (growing_buffer) {
1831 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1832 pkt->data = s->pb.buf;
1833 pkt->size = avctx->internal->byte_buffer_size;
1838 #if FF_API_STAT_BITS
1839 FF_DISABLE_DEPRECATION_WARNINGS
1840 avctx->header_bits = s->header_bits;
1841 avctx->mv_bits = s->mv_bits;
1842 avctx->misc_bits = s->misc_bits;
1843 avctx->i_tex_bits = s->i_tex_bits;
1844 avctx->p_tex_bits = s->p_tex_bits;
1845 avctx->i_count = s->i_count;
1846 // FIXME f/b_count in avctx
1847 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1848 avctx->skip_count = s->skip_count;
1849 FF_ENABLE_DEPRECATION_WARNINGS
1854 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1855 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1857 if (avctx->rc_buffer_size) {
1858 RateControlContext *rcc = &s->rc_context;
1859 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1860 int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1861 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1863 if (put_bits_count(&s->pb) > max_size &&
1864 s->lambda < s->lmax) {
1865 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1866 (s->qscale + 1) / s->qscale);
1867 if (s->adaptive_quant) {
1869 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1870 s->lambda_table[i] =
1871 FFMAX(s->lambda_table[i] + min_step,
1872 s->lambda_table[i] * (s->qscale + 1) /
1875 s->mb_skipped = 0; // done in frame_start()
1876 // done in encode_picture() so we must undo it
1877 if (s->pict_type == AV_PICTURE_TYPE_P) {
1878 if (s->flipflop_rounding ||
1879 s->codec_id == AV_CODEC_ID_H263P ||
1880 s->codec_id == AV_CODEC_ID_MPEG4)
1881 s->no_rounding ^= 1;
1883 if (s->pict_type != AV_PICTURE_TYPE_B) {
1884 s->time_base = s->last_time_base;
1885 s->last_non_b_time = s->time - s->pp_time;
1887 for (i = 0; i < context_count; i++) {
1888 PutBitContext *pb = &s->thread_context[i]->pb;
1889 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1891 s->vbv_ignore_qmax = 1;
1892 av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1896 av_assert0(s->avctx->rc_max_rate);
1899 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1900 ff_write_pass1_stats(s);
1902 for (i = 0; i < 4; i++) {
1903 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1904 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1906 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1907 s->current_picture_ptr->encoding_error,
1908 (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1911 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1912 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1913 s->misc_bits + s->i_tex_bits +
1915 flush_put_bits(&s->pb);
1916 s->frame_bits = put_bits_count(&s->pb);
1918 stuffing_count = ff_vbv_update(s, s->frame_bits);
1919 s->stuffing_bits = 8*stuffing_count;
1920 if (stuffing_count) {
1921 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1922 stuffing_count + 50) {
1923 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1927 switch (s->codec_id) {
1928 case AV_CODEC_ID_MPEG1VIDEO:
1929 case AV_CODEC_ID_MPEG2VIDEO:
1930 while (stuffing_count--) {
1931 put_bits(&s->pb, 8, 0);
1934 case AV_CODEC_ID_MPEG4:
1935 put_bits(&s->pb, 16, 0);
1936 put_bits(&s->pb, 16, 0x1C3);
1937 stuffing_count -= 4;
1938 while (stuffing_count--) {
1939 put_bits(&s->pb, 8, 0xFF);
1943 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1945 flush_put_bits(&s->pb);
1946 s->frame_bits = put_bits_count(&s->pb);
1949 /* update mpeg1/2 vbv_delay for CBR */
1950 if (s->avctx->rc_max_rate &&
1951 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1952 s->out_format == FMT_MPEG1 &&
1953 90000LL * (avctx->rc_buffer_size - 1) <=
1954 s->avctx->rc_max_rate * 0xFFFFLL) {
1955 AVCPBProperties *props;
1958 int vbv_delay, min_delay;
1959 double inbits = s->avctx->rc_max_rate *
1960 av_q2d(s->avctx->time_base);
1961 int minbits = s->frame_bits - 8 *
1962 (s->vbv_delay_ptr - s->pb.buf - 1);
1963 double bits = s->rc_context.buffer_index + minbits - inbits;
1966 av_log(s->avctx, AV_LOG_ERROR,
1967 "Internal error, negative bits\n");
1969 assert(s->repeat_first_field == 0);
1971 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1972 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1973 s->avctx->rc_max_rate;
1975 vbv_delay = FFMAX(vbv_delay, min_delay);
1977 av_assert0(vbv_delay < 0xFFFF);
1979 s->vbv_delay_ptr[0] &= 0xF8;
1980 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1981 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1982 s->vbv_delay_ptr[2] &= 0x07;
1983 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1985 props = av_cpb_properties_alloc(&props_size);
1987 return AVERROR(ENOMEM);
1988 props->vbv_delay = vbv_delay * 300;
1990 #if FF_API_VBV_DELAY
1991 FF_DISABLE_DEPRECATION_WARNINGS
1992 avctx->vbv_delay = vbv_delay * 300;
1993 FF_ENABLE_DEPRECATION_WARNINGS
1996 s->total_bits += s->frame_bits;
1997 #if FF_API_STAT_BITS
1998 FF_DISABLE_DEPRECATION_WARNINGS
1999 avctx->frame_bits = s->frame_bits;
2000 FF_ENABLE_DEPRECATION_WARNINGS
2004 pkt->pts = s->current_picture.f->pts;
2005 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2006 if (!s->current_picture.f->coded_picture_number)
2007 pkt->dts = pkt->pts - s->dts_delta;
2009 pkt->dts = s->reordered_pts;
2010 s->reordered_pts = pkt->pts;
2012 pkt->dts = pkt->pts;
2013 if (s->current_picture.f->key_frame)
2014 pkt->flags |= AV_PKT_FLAG_KEY;
2016 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2021 /* release non-reference frames */
2022 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2023 if (!s->picture[i].reference)
2024 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2027 av_assert1((s->frame_bits & 7) == 0);
2029 pkt->size = s->frame_bits / 8;
2030 *got_packet = !!pkt->size;
2034 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2035 int n, int threshold)
2037 static const char tab[64] = {
2038 3, 2, 2, 1, 1, 1, 1, 1,
2039 1, 1, 1, 1, 1, 1, 1, 1,
2040 1, 1, 1, 1, 1, 1, 1, 1,
2041 0, 0, 0, 0, 0, 0, 0, 0,
2042 0, 0, 0, 0, 0, 0, 0, 0,
2043 0, 0, 0, 0, 0, 0, 0, 0,
2044 0, 0, 0, 0, 0, 0, 0, 0,
2045 0, 0, 0, 0, 0, 0, 0, 0
2050 int16_t *block = s->block[n];
2051 const int last_index = s->block_last_index[n];
2054 if (threshold < 0) {
2056 threshold = -threshold;
2060 /* Are all we could set to zero already zero? */
2061 if (last_index <= skip_dc - 1)
2064 for (i = 0; i <= last_index; i++) {
2065 const int j = s->intra_scantable.permutated[i];
2066 const int level = FFABS(block[j]);
2068 if (skip_dc && i == 0)
2072 } else if (level > 1) {
2078 if (score >= threshold)
2080 for (i = skip_dc; i <= last_index; i++) {
2081 const int j = s->intra_scantable.permutated[i];
2085 s->block_last_index[n] = 0;
2087 s->block_last_index[n] = -1;
2090 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2094 const int maxlevel = s->max_qcoeff;
2095 const int minlevel = s->min_qcoeff;
2099 i = 1; // skip clipping of intra dc
2103 for (; i <= last_index; i++) {
2104 const int j = s->intra_scantable.permutated[i];
2105 int level = block[j];
2107 if (level > maxlevel) {
2110 } else if (level < minlevel) {
2118 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2119 av_log(s->avctx, AV_LOG_INFO,
2120 "warning, clipping %d dct coefficients to %d..%d\n",
2121 overflow, minlevel, maxlevel);
2124 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2128 for (y = 0; y < 8; y++) {
2129 for (x = 0; x < 8; x++) {
2135 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2136 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2137 int v = ptr[x2 + y2 * stride];
2143 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2148 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2149 int motion_x, int motion_y,
2150 int mb_block_height,
2154 int16_t weight[12][64];
2155 int16_t orig[12][64];
2156 const int mb_x = s->mb_x;
2157 const int mb_y = s->mb_y;
2160 int dct_offset = s->linesize * 8; // default for progressive frames
2161 int uv_dct_offset = s->uvlinesize * 8;
2162 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2163 ptrdiff_t wrap_y, wrap_c;
2165 for (i = 0; i < mb_block_count; i++)
2166 skip_dct[i] = s->skipdct;
2168 if (s->adaptive_quant) {
2169 const int last_qp = s->qscale;
2170 const int mb_xy = mb_x + mb_y * s->mb_stride;
2172 s->lambda = s->lambda_table[mb_xy];
2175 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2176 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2177 s->dquant = s->qscale - last_qp;
2179 if (s->out_format == FMT_H263) {
2180 s->dquant = av_clip(s->dquant, -2, 2);
2182 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2184 if (s->pict_type == AV_PICTURE_TYPE_B) {
2185 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2188 if (s->mv_type == MV_TYPE_8X8)
2194 ff_set_qscale(s, last_qp + s->dquant);
2195 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2196 ff_set_qscale(s, s->qscale + s->dquant);
2198 wrap_y = s->linesize;
2199 wrap_c = s->uvlinesize;
2200 ptr_y = s->new_picture.f->data[0] +
2201 (mb_y * 16 * wrap_y) + mb_x * 16;
2202 ptr_cb = s->new_picture.f->data[1] +
2203 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2204 ptr_cr = s->new_picture.f->data[2] +
2205 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2207 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2208 uint8_t *ebuf = s->sc.edge_emu_buffer + 36 * wrap_y;
2209 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2210 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2211 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2213 16, 16, mb_x * 16, mb_y * 16,
2214 s->width, s->height);
2216 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2218 mb_block_width, mb_block_height,
2219 mb_x * mb_block_width, mb_y * mb_block_height,
2221 ptr_cb = ebuf + 16 * wrap_y;
2222 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2224 mb_block_width, mb_block_height,
2225 mb_x * mb_block_width, mb_y * mb_block_height,
2227 ptr_cr = ebuf + 16 * wrap_y + 16;
2231 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2232 int progressive_score, interlaced_score;
2234 s->interlaced_dct = 0;
2235 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2236 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2237 NULL, wrap_y, 8) - 400;
2239 if (progressive_score > 0) {
2240 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2241 NULL, wrap_y * 2, 8) +
2242 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2243 NULL, wrap_y * 2, 8);
2244 if (progressive_score > interlaced_score) {
2245 s->interlaced_dct = 1;
2247 dct_offset = wrap_y;
2248 uv_dct_offset = wrap_c;
2250 if (s->chroma_format == CHROMA_422 ||
2251 s->chroma_format == CHROMA_444)
2257 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2258 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2259 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2260 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2262 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2266 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2267 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2268 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2269 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2270 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2271 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2272 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2273 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2274 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2275 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2276 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2277 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2281 op_pixels_func (*op_pix)[4];
2282 qpel_mc_func (*op_qpix)[16];
2283 uint8_t *dest_y, *dest_cb, *dest_cr;
2285 dest_y = s->dest[0];
2286 dest_cb = s->dest[1];
2287 dest_cr = s->dest[2];
2289 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2290 op_pix = s->hdsp.put_pixels_tab;
2291 op_qpix = s->qdsp.put_qpel_pixels_tab;
2293 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2294 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2297 if (s->mv_dir & MV_DIR_FORWARD) {
2298 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2299 s->last_picture.f->data,
2301 op_pix = s->hdsp.avg_pixels_tab;
2302 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2304 if (s->mv_dir & MV_DIR_BACKWARD) {
2305 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2306 s->next_picture.f->data,
2310 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2311 int progressive_score, interlaced_score;
2313 s->interlaced_dct = 0;
2314 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2315 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2319 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2320 progressive_score -= 400;
2322 if (progressive_score > 0) {
2323 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2325 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2329 if (progressive_score > interlaced_score) {
2330 s->interlaced_dct = 1;
2332 dct_offset = wrap_y;
2333 uv_dct_offset = wrap_c;
2335 if (s->chroma_format == CHROMA_422)
2341 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2342 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2343 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2344 dest_y + dct_offset, wrap_y);
2345 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2346 dest_y + dct_offset + 8, wrap_y);
2348 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2352 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2353 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2354 if (!s->chroma_y_shift) { /* 422 */
2355 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2356 dest_cb + uv_dct_offset, wrap_c);
2357 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2358 dest_cr + uv_dct_offset, wrap_c);
2361 /* pre quantization */
2362 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2363 2 * s->qscale * s->qscale) {
2365 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2367 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2369 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2370 wrap_y, 8) < 20 * s->qscale)
2372 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2373 wrap_y, 8) < 20 * s->qscale)
2375 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2377 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2379 if (!s->chroma_y_shift) { /* 422 */
2380 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2381 dest_cb + uv_dct_offset,
2382 wrap_c, 8) < 20 * s->qscale)
2384 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2385 dest_cr + uv_dct_offset,
2386 wrap_c, 8) < 20 * s->qscale)
2392 if (s->quantizer_noise_shaping) {
2394 get_visual_weight(weight[0], ptr_y , wrap_y);
2396 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2398 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2400 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2402 get_visual_weight(weight[4], ptr_cb , wrap_c);
2404 get_visual_weight(weight[5], ptr_cr , wrap_c);
2405 if (!s->chroma_y_shift) { /* 422 */
2407 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2410 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2413 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2416 /* DCT & quantize */
2417 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2419 for (i = 0; i < mb_block_count; i++) {
2422 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2423 // FIXME we could decide to change to quantizer instead of
2425 // JS: I don't think that would be a good idea it could lower
2426 // quality instead of improve it. Just INTRADC clipping
2427 // deserves changes in quantizer
2429 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2431 s->block_last_index[i] = -1;
2433 if (s->quantizer_noise_shaping) {
2434 for (i = 0; i < mb_block_count; i++) {
2436 s->block_last_index[i] =
2437 dct_quantize_refine(s, s->block[i], weight[i],
2438 orig[i], i, s->qscale);
2443 if (s->luma_elim_threshold && !s->mb_intra)
2444 for (i = 0; i < 4; i++)
2445 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2446 if (s->chroma_elim_threshold && !s->mb_intra)
2447 for (i = 4; i < mb_block_count; i++)
2448 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2450 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2451 for (i = 0; i < mb_block_count; i++) {
2452 if (s->block_last_index[i] == -1)
2453 s->coded_score[i] = INT_MAX / 256;
2458 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2459 s->block_last_index[4] =
2460 s->block_last_index[5] = 0;
2462 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2463 if (!s->chroma_y_shift) { /* 422 / 444 */
2464 for (i=6; i<12; i++) {
2465 s->block_last_index[i] = 0;
2466 s->block[i][0] = s->block[4][0];
2471 // non c quantize code returns incorrect block_last_index FIXME
2472 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2473 for (i = 0; i < mb_block_count; i++) {
2475 if (s->block_last_index[i] > 0) {
2476 for (j = 63; j > 0; j--) {
2477 if (s->block[i][s->intra_scantable.permutated[j]])
2480 s->block_last_index[i] = j;
2485 /* huffman encode */
2486 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2487 case AV_CODEC_ID_MPEG1VIDEO:
2488 case AV_CODEC_ID_MPEG2VIDEO:
2489 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2490 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2492 case AV_CODEC_ID_MPEG4:
2493 if (CONFIG_MPEG4_ENCODER)
2494 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2496 case AV_CODEC_ID_MSMPEG4V2:
2497 case AV_CODEC_ID_MSMPEG4V3:
2498 case AV_CODEC_ID_WMV1:
2499 if (CONFIG_MSMPEG4_ENCODER)
2500 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2502 case AV_CODEC_ID_WMV2:
2503 if (CONFIG_WMV2_ENCODER)
2504 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2506 case AV_CODEC_ID_H261:
2507 if (CONFIG_H261_ENCODER)
2508 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2510 case AV_CODEC_ID_H263:
2511 case AV_CODEC_ID_H263P:
2512 case AV_CODEC_ID_FLV1:
2513 case AV_CODEC_ID_RV10:
2514 case AV_CODEC_ID_RV20:
2515 if (CONFIG_H263_ENCODER)
2516 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2518 case AV_CODEC_ID_MJPEG:
2519 case AV_CODEC_ID_AMV:
2520 if (CONFIG_MJPEG_ENCODER)
2521 ff_mjpeg_encode_mb(s, s->block);
2528 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2530 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2531 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2532 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2535 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2538 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2541 d->mb_skip_run= s->mb_skip_run;
2543 d->last_dc[i] = s->last_dc[i];
2546 d->mv_bits= s->mv_bits;
2547 d->i_tex_bits= s->i_tex_bits;
2548 d->p_tex_bits= s->p_tex_bits;
2549 d->i_count= s->i_count;
2550 d->f_count= s->f_count;
2551 d->b_count= s->b_count;
2552 d->skip_count= s->skip_count;
2553 d->misc_bits= s->misc_bits;
2557 d->qscale= s->qscale;
2558 d->dquant= s->dquant;
2560 d->esc3_level_length= s->esc3_level_length;
2563 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2566 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2567 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2570 d->mb_skip_run= s->mb_skip_run;
2572 d->last_dc[i] = s->last_dc[i];
2575 d->mv_bits= s->mv_bits;
2576 d->i_tex_bits= s->i_tex_bits;
2577 d->p_tex_bits= s->p_tex_bits;
2578 d->i_count= s->i_count;
2579 d->f_count= s->f_count;
2580 d->b_count= s->b_count;
2581 d->skip_count= s->skip_count;
2582 d->misc_bits= s->misc_bits;
2584 d->mb_intra= s->mb_intra;
2585 d->mb_skipped= s->mb_skipped;
2586 d->mv_type= s->mv_type;
2587 d->mv_dir= s->mv_dir;
2589 if(s->data_partitioning){
2591 d->tex_pb= s->tex_pb;
2595 d->block_last_index[i]= s->block_last_index[i];
2596 d->interlaced_dct= s->interlaced_dct;
2597 d->qscale= s->qscale;
2599 d->esc3_level_length= s->esc3_level_length;
2602 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2603 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2604 int *dmin, int *next_block, int motion_x, int motion_y)
2607 uint8_t *dest_backup[3];
2609 copy_context_before_encode(s, backup, type);
2611 s->block= s->blocks[*next_block];
2612 s->pb= pb[*next_block];
2613 if(s->data_partitioning){
2614 s->pb2 = pb2 [*next_block];
2615 s->tex_pb= tex_pb[*next_block];
2619 memcpy(dest_backup, s->dest, sizeof(s->dest));
2620 s->dest[0] = s->sc.rd_scratchpad;
2621 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2622 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2623 av_assert0(s->linesize >= 32); //FIXME
2626 encode_mb(s, motion_x, motion_y);
2628 score= put_bits_count(&s->pb);
2629 if(s->data_partitioning){
2630 score+= put_bits_count(&s->pb2);
2631 score+= put_bits_count(&s->tex_pb);
2634 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2635 ff_mpv_decode_mb(s, s->block);
2637 score *= s->lambda2;
2638 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2642 memcpy(s->dest, dest_backup, sizeof(s->dest));
2649 copy_context_after_encode(best, s, type);
2653 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2654 uint32_t *sq = ff_square_tab + 256;
2659 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2660 else if(w==8 && h==8)
2661 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2665 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2674 static int sse_mb(MpegEncContext *s){
2678 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2679 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2682 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2683 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2684 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2685 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2687 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2688 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2689 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2692 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2693 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2694 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2697 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2698 MpegEncContext *s= *(void**)arg;
2702 s->me.dia_size= s->avctx->pre_dia_size;
2703 s->first_slice_line=1;
2704 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2705 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2706 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2708 s->first_slice_line=0;
2716 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2717 MpegEncContext *s= *(void**)arg;
2719 ff_check_alignment();
2721 s->me.dia_size= s->avctx->dia_size;
2722 s->first_slice_line=1;
2723 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2724 s->mb_x=0; //for block init below
2725 ff_init_block_index(s);
2726 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2727 s->block_index[0]+=2;
2728 s->block_index[1]+=2;
2729 s->block_index[2]+=2;
2730 s->block_index[3]+=2;
2732 /* compute motion vector & mb_type and store in context */
2733 if(s->pict_type==AV_PICTURE_TYPE_B)
2734 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2736 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2738 s->first_slice_line=0;
2743 static int mb_var_thread(AVCodecContext *c, void *arg){
2744 MpegEncContext *s= *(void**)arg;
2747 ff_check_alignment();
2749 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2750 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2753 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2755 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2757 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2758 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2760 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2761 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2762 s->me.mb_var_sum_temp += varc;
2768 static void write_slice_end(MpegEncContext *s){
2769 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2770 if(s->partitioned_frame){
2771 ff_mpeg4_merge_partitions(s);
2774 ff_mpeg4_stuffing(&s->pb);
2775 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2776 ff_mjpeg_encode_stuffing(s);
2779 avpriv_align_put_bits(&s->pb);
2780 flush_put_bits(&s->pb);
2782 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2783 s->misc_bits+= get_bits_diff(s);
2786 static void write_mb_info(MpegEncContext *s)
2788 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2789 int offset = put_bits_count(&s->pb);
2790 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2791 int gobn = s->mb_y / s->gob_index;
2793 if (CONFIG_H263_ENCODER)
2794 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2795 bytestream_put_le32(&ptr, offset);
2796 bytestream_put_byte(&ptr, s->qscale);
2797 bytestream_put_byte(&ptr, gobn);
2798 bytestream_put_le16(&ptr, mba);
2799 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2800 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2801 /* 4MV not implemented */
2802 bytestream_put_byte(&ptr, 0); /* hmv2 */
2803 bytestream_put_byte(&ptr, 0); /* vmv2 */
2806 static void update_mb_info(MpegEncContext *s, int startcode)
2810 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2811 s->mb_info_size += 12;
2812 s->prev_mb_info = s->last_mb_info;
2815 s->prev_mb_info = put_bits_count(&s->pb)/8;
2816 /* This might have incremented mb_info_size above, and we return without
2817 * actually writing any info into that slot yet. But in that case,
2818 * this will be called again at the start of the after writing the
2819 * start code, actually writing the mb info. */
2823 s->last_mb_info = put_bits_count(&s->pb)/8;
2824 if (!s->mb_info_size)
2825 s->mb_info_size += 12;
2829 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2831 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2832 && s->slice_context_count == 1
2833 && s->pb.buf == s->avctx->internal->byte_buffer) {
2834 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2835 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2837 uint8_t *new_buffer = NULL;
2838 int new_buffer_size = 0;
2840 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2841 s->avctx->internal->byte_buffer_size + size_increase);
2843 return AVERROR(ENOMEM);
2845 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2846 av_free(s->avctx->internal->byte_buffer);
2847 s->avctx->internal->byte_buffer = new_buffer;
2848 s->avctx->internal->byte_buffer_size = new_buffer_size;
2849 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2850 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2851 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2853 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2854 return AVERROR(EINVAL);
2858 static int encode_thread(AVCodecContext *c, void *arg){
2859 MpegEncContext *s= *(void**)arg;
2860 int mb_x, mb_y, pdif = 0;
2861 int chr_h= 16>>s->chroma_y_shift;
2863 MpegEncContext best_s = { 0 }, backup_s;
2864 uint8_t bit_buf[2][MAX_MB_BYTES];
2865 uint8_t bit_buf2[2][MAX_MB_BYTES];
2866 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2867 PutBitContext pb[2], pb2[2], tex_pb[2];
2869 ff_check_alignment();
2872 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2873 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2874 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2877 s->last_bits= put_bits_count(&s->pb);
2888 /* init last dc values */
2889 /* note: quant matrix value (8) is implied here */
2890 s->last_dc[i] = 128 << s->intra_dc_precision;
2892 s->current_picture.encoding_error[i] = 0;
2894 if(s->codec_id==AV_CODEC_ID_AMV){
2895 s->last_dc[0] = 128*8/13;
2896 s->last_dc[1] = 128*8/14;
2897 s->last_dc[2] = 128*8/14;
2900 memset(s->last_mv, 0, sizeof(s->last_mv));
2904 switch(s->codec_id){
2905 case AV_CODEC_ID_H263:
2906 case AV_CODEC_ID_H263P:
2907 case AV_CODEC_ID_FLV1:
2908 if (CONFIG_H263_ENCODER)
2909 s->gob_index = H263_GOB_HEIGHT(s->height);
2911 case AV_CODEC_ID_MPEG4:
2912 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2913 ff_mpeg4_init_partitions(s);
2919 s->first_slice_line = 1;
2920 s->ptr_lastgob = s->pb.buf;
2921 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2925 ff_set_qscale(s, s->qscale);
2926 ff_init_block_index(s);
2928 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2929 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2930 int mb_type= s->mb_type[xy];
2934 int size_increase = s->avctx->internal->byte_buffer_size/4
2935 + s->mb_width*MAX_MB_BYTES;
2937 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2938 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2939 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2942 if(s->data_partitioning){
2943 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2944 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2945 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2951 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2952 ff_update_block_index(s);
2954 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2955 ff_h261_reorder_mb_index(s);
2956 xy= s->mb_y*s->mb_stride + s->mb_x;
2957 mb_type= s->mb_type[xy];
2960 /* write gob / video packet header */
2962 int current_packet_size, is_gob_start;
2964 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2966 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2968 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2970 switch(s->codec_id){
2971 case AV_CODEC_ID_H263:
2972 case AV_CODEC_ID_H263P:
2973 if(!s->h263_slice_structured)
2974 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2976 case AV_CODEC_ID_MPEG2VIDEO:
2977 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2978 case AV_CODEC_ID_MPEG1VIDEO:
2979 if(s->mb_skip_run) is_gob_start=0;
2981 case AV_CODEC_ID_MJPEG:
2982 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2987 if(s->start_mb_y != mb_y || mb_x!=0){
2990 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2991 ff_mpeg4_init_partitions(s);
2995 av_assert2((put_bits_count(&s->pb)&7) == 0);
2996 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2998 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2999 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3000 int d = 100 / s->error_rate;
3002 current_packet_size=0;
3003 s->pb.buf_ptr= s->ptr_lastgob;
3004 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3008 #if FF_API_RTP_CALLBACK
3009 FF_DISABLE_DEPRECATION_WARNINGS
3010 if (s->avctx->rtp_callback){
3011 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3012 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3014 FF_ENABLE_DEPRECATION_WARNINGS
3016 update_mb_info(s, 1);
3018 switch(s->codec_id){
3019 case AV_CODEC_ID_MPEG4:
3020 if (CONFIG_MPEG4_ENCODER) {
3021 ff_mpeg4_encode_video_packet_header(s);
3022 ff_mpeg4_clean_buffers(s);
3025 case AV_CODEC_ID_MPEG1VIDEO:
3026 case AV_CODEC_ID_MPEG2VIDEO:
3027 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3028 ff_mpeg1_encode_slice_header(s);
3029 ff_mpeg1_clean_buffers(s);
3032 case AV_CODEC_ID_H263:
3033 case AV_CODEC_ID_H263P:
3034 if (CONFIG_H263_ENCODER)
3035 ff_h263_encode_gob_header(s, mb_y);
3039 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3040 int bits= put_bits_count(&s->pb);
3041 s->misc_bits+= bits - s->last_bits;
3045 s->ptr_lastgob += current_packet_size;
3046 s->first_slice_line=1;
3047 s->resync_mb_x=mb_x;
3048 s->resync_mb_y=mb_y;
3052 if( (s->resync_mb_x == s->mb_x)
3053 && s->resync_mb_y+1 == s->mb_y){
3054 s->first_slice_line=0;
3058 s->dquant=0; //only for QP_RD
3060 update_mb_info(s, 0);
3062 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3064 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3066 copy_context_before_encode(&backup_s, s, -1);
3068 best_s.data_partitioning= s->data_partitioning;
3069 best_s.partitioned_frame= s->partitioned_frame;
3070 if(s->data_partitioning){
3071 backup_s.pb2= s->pb2;
3072 backup_s.tex_pb= s->tex_pb;
3075 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3076 s->mv_dir = MV_DIR_FORWARD;
3077 s->mv_type = MV_TYPE_16X16;
3079 s->mv[0][0][0] = s->p_mv_table[xy][0];
3080 s->mv[0][0][1] = s->p_mv_table[xy][1];
3081 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3082 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3084 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3085 s->mv_dir = MV_DIR_FORWARD;
3086 s->mv_type = MV_TYPE_FIELD;
3089 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3090 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3091 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3093 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3094 &dmin, &next_block, 0, 0);
3096 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3097 s->mv_dir = MV_DIR_FORWARD;
3098 s->mv_type = MV_TYPE_16X16;
3102 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3103 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3105 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3106 s->mv_dir = MV_DIR_FORWARD;
3107 s->mv_type = MV_TYPE_8X8;
3110 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3111 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3113 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3114 &dmin, &next_block, 0, 0);
3116 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3117 s->mv_dir = MV_DIR_FORWARD;
3118 s->mv_type = MV_TYPE_16X16;
3120 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3121 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3122 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3123 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3125 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3126 s->mv_dir = MV_DIR_BACKWARD;
3127 s->mv_type = MV_TYPE_16X16;
3129 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3130 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3131 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3132 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3134 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3135 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3136 s->mv_type = MV_TYPE_16X16;
3138 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3139 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3140 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3141 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3142 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3143 &dmin, &next_block, 0, 0);
3145 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3146 s->mv_dir = MV_DIR_FORWARD;
3147 s->mv_type = MV_TYPE_FIELD;
3150 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3151 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3152 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3154 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3155 &dmin, &next_block, 0, 0);
3157 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3158 s->mv_dir = MV_DIR_BACKWARD;
3159 s->mv_type = MV_TYPE_FIELD;
3162 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3163 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3164 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3166 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3167 &dmin, &next_block, 0, 0);
3169 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3170 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3171 s->mv_type = MV_TYPE_FIELD;
3173 for(dir=0; dir<2; dir++){
3175 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3176 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3177 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3180 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3181 &dmin, &next_block, 0, 0);
3183 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3185 s->mv_type = MV_TYPE_16X16;
3189 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3190 &dmin, &next_block, 0, 0);
3191 if(s->h263_pred || s->h263_aic){
3193 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3195 ff_clean_intra_table_entries(s); //old mode?
3199 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3200 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3201 const int last_qp= backup_s.qscale;
3204 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3205 static const int dquant_tab[4]={-1,1,-2,2};
3206 int storecoefs = s->mb_intra && s->dc_val[0];
3208 av_assert2(backup_s.dquant == 0);
3211 s->mv_dir= best_s.mv_dir;
3212 s->mv_type = MV_TYPE_16X16;
3213 s->mb_intra= best_s.mb_intra;
3214 s->mv[0][0][0] = best_s.mv[0][0][0];
3215 s->mv[0][0][1] = best_s.mv[0][0][1];
3216 s->mv[1][0][0] = best_s.mv[1][0][0];
3217 s->mv[1][0][1] = best_s.mv[1][0][1];
3219 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3220 for(; qpi<4; qpi++){
3221 int dquant= dquant_tab[qpi];
3222 qp= last_qp + dquant;
3223 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3225 backup_s.dquant= dquant;
3228 dc[i]= s->dc_val[0][ s->block_index[i] ];
3229 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3233 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3234 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3235 if(best_s.qscale != qp){
3238 s->dc_val[0][ s->block_index[i] ]= dc[i];
3239 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3246 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3247 int mx= s->b_direct_mv_table[xy][0];
3248 int my= s->b_direct_mv_table[xy][1];
3250 backup_s.dquant = 0;
3251 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3253 ff_mpeg4_set_direct_mv(s, mx, my);
3254 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3255 &dmin, &next_block, mx, my);
3257 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3258 backup_s.dquant = 0;
3259 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3261 ff_mpeg4_set_direct_mv(s, 0, 0);
3262 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3263 &dmin, &next_block, 0, 0);
3265 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3268 coded |= s->block_last_index[i];
3271 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3272 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3273 mx=my=0; //FIXME find the one we actually used
3274 ff_mpeg4_set_direct_mv(s, mx, my);
3275 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3283 s->mv_dir= best_s.mv_dir;
3284 s->mv_type = best_s.mv_type;
3286 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3287 s->mv[0][0][1] = best_s.mv[0][0][1];
3288 s->mv[1][0][0] = best_s.mv[1][0][0];
3289 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3292 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3293 &dmin, &next_block, mx, my);
3298 s->current_picture.qscale_table[xy] = best_s.qscale;
3300 copy_context_after_encode(s, &best_s, -1);
3302 pb_bits_count= put_bits_count(&s->pb);
3303 flush_put_bits(&s->pb);
3304 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3307 if(s->data_partitioning){
3308 pb2_bits_count= put_bits_count(&s->pb2);
3309 flush_put_bits(&s->pb2);
3310 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3311 s->pb2= backup_s.pb2;
3313 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3314 flush_put_bits(&s->tex_pb);
3315 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3316 s->tex_pb= backup_s.tex_pb;
3318 s->last_bits= put_bits_count(&s->pb);
3320 if (CONFIG_H263_ENCODER &&
3321 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3322 ff_h263_update_motion_val(s);
3324 if(next_block==0){ //FIXME 16 vs linesize16
3325 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3326 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3327 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3330 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3331 ff_mpv_decode_mb(s, s->block);
3333 int motion_x = 0, motion_y = 0;
3334 s->mv_type=MV_TYPE_16X16;
3335 // only one MB-Type possible
3338 case CANDIDATE_MB_TYPE_INTRA:
3341 motion_x= s->mv[0][0][0] = 0;
3342 motion_y= s->mv[0][0][1] = 0;
3344 case CANDIDATE_MB_TYPE_INTER:
3345 s->mv_dir = MV_DIR_FORWARD;
3347 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3348 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3350 case CANDIDATE_MB_TYPE_INTER_I:
3351 s->mv_dir = MV_DIR_FORWARD;
3352 s->mv_type = MV_TYPE_FIELD;
3355 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3356 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3357 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3360 case CANDIDATE_MB_TYPE_INTER4V:
3361 s->mv_dir = MV_DIR_FORWARD;
3362 s->mv_type = MV_TYPE_8X8;
3365 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3366 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3369 case CANDIDATE_MB_TYPE_DIRECT:
3370 if (CONFIG_MPEG4_ENCODER) {
3371 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3373 motion_x=s->b_direct_mv_table[xy][0];
3374 motion_y=s->b_direct_mv_table[xy][1];
3375 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3378 case CANDIDATE_MB_TYPE_DIRECT0:
3379 if (CONFIG_MPEG4_ENCODER) {
3380 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3382 ff_mpeg4_set_direct_mv(s, 0, 0);
3385 case CANDIDATE_MB_TYPE_BIDIR:
3386 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3388 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3389 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3390 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3391 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3393 case CANDIDATE_MB_TYPE_BACKWARD:
3394 s->mv_dir = MV_DIR_BACKWARD;
3396 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3397 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3399 case CANDIDATE_MB_TYPE_FORWARD:
3400 s->mv_dir = MV_DIR_FORWARD;
3402 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3403 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3405 case CANDIDATE_MB_TYPE_FORWARD_I:
3406 s->mv_dir = MV_DIR_FORWARD;
3407 s->mv_type = MV_TYPE_FIELD;
3410 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3411 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3412 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3415 case CANDIDATE_MB_TYPE_BACKWARD_I:
3416 s->mv_dir = MV_DIR_BACKWARD;
3417 s->mv_type = MV_TYPE_FIELD;
3420 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3421 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3422 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3425 case CANDIDATE_MB_TYPE_BIDIR_I:
3426 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3427 s->mv_type = MV_TYPE_FIELD;
3429 for(dir=0; dir<2; dir++){
3431 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3432 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3433 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3438 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3441 encode_mb(s, motion_x, motion_y);
3443 // RAL: Update last macroblock type
3444 s->last_mv_dir = s->mv_dir;
3446 if (CONFIG_H263_ENCODER &&
3447 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3448 ff_h263_update_motion_val(s);
3450 ff_mpv_decode_mb(s, s->block);
3453 /* clean the MV table in IPS frames for direct mode in B frames */
3454 if(s->mb_intra /* && I,P,S_TYPE */){
3455 s->p_mv_table[xy][0]=0;
3456 s->p_mv_table[xy][1]=0;
3459 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3463 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3464 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3466 s->current_picture.encoding_error[0] += sse(
3467 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3468 s->dest[0], w, h, s->linesize);
3469 s->current_picture.encoding_error[1] += sse(
3470 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3471 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3472 s->current_picture.encoding_error[2] += sse(
3473 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3474 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3477 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3478 ff_h263_loop_filter(s);
3480 ff_dlog(s->avctx, "MB %d %d bits\n",
3481 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3485 //not beautiful here but we must write it before flushing so it has to be here
3486 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3487 ff_msmpeg4_encode_ext_header(s);
3491 #if FF_API_RTP_CALLBACK
3492 FF_DISABLE_DEPRECATION_WARNINGS
3493 /* Send the last GOB if RTP */
3494 if (s->avctx->rtp_callback) {
3495 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3496 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3497 /* Call the RTP callback to send the last GOB */
3499 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3501 FF_ENABLE_DEPRECATION_WARNINGS
3507 #define MERGE(field) dst->field += src->field; src->field=0
3508 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3509 MERGE(me.scene_change_score);
3510 MERGE(me.mc_mb_var_sum_temp);
3511 MERGE(me.mb_var_sum_temp);
3514 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3517 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3518 MERGE(dct_count[1]);
3527 MERGE(er.error_count);
3528 MERGE(padding_bug_score);
3529 MERGE(current_picture.encoding_error[0]);
3530 MERGE(current_picture.encoding_error[1]);
3531 MERGE(current_picture.encoding_error[2]);
3533 if(dst->avctx->noise_reduction){
3534 for(i=0; i<64; i++){
3535 MERGE(dct_error_sum[0][i]);
3536 MERGE(dct_error_sum[1][i]);
3540 assert(put_bits_count(&src->pb) % 8 ==0);
3541 assert(put_bits_count(&dst->pb) % 8 ==0);
3542 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3543 flush_put_bits(&dst->pb);
3546 static int estimate_qp(MpegEncContext *s, int dry_run){
3547 if (s->next_lambda){
3548 s->current_picture_ptr->f->quality =
3549 s->current_picture.f->quality = s->next_lambda;
3550 if(!dry_run) s->next_lambda= 0;
3551 } else if (!s->fixed_qscale) {
3552 s->current_picture_ptr->f->quality =
3553 s->current_picture.f->quality = ff_rate_estimate_qscale(s, dry_run);
3554 if (s->current_picture.f->quality < 0)
3558 if(s->adaptive_quant){
3559 switch(s->codec_id){
3560 case AV_CODEC_ID_MPEG4:
3561 if (CONFIG_MPEG4_ENCODER)
3562 ff_clean_mpeg4_qscales(s);
3564 case AV_CODEC_ID_H263:
3565 case AV_CODEC_ID_H263P:
3566 case AV_CODEC_ID_FLV1:
3567 if (CONFIG_H263_ENCODER)
3568 ff_clean_h263_qscales(s);
3571 ff_init_qscale_tab(s);
3574 s->lambda= s->lambda_table[0];
3577 s->lambda = s->current_picture.f->quality;
3582 /* must be called before writing the header */
3583 static void set_frame_distances(MpegEncContext * s){
3584 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3585 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3587 if(s->pict_type==AV_PICTURE_TYPE_B){
3588 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3589 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3591 s->pp_time= s->time - s->last_non_b_time;
3592 s->last_non_b_time= s->time;
3593 assert(s->picture_number==0 || s->pp_time > 0);
3597 static int encode_picture(MpegEncContext *s, int picture_number)
3601 int context_count = s->slice_context_count;
3603 s->picture_number = picture_number;
3605 /* Reset the average MB variance */
3606 s->me.mb_var_sum_temp =
3607 s->me.mc_mb_var_sum_temp = 0;
3609 /* we need to initialize some time vars before we can encode b-frames */
3610 // RAL: Condition added for MPEG1VIDEO
3611 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3612 set_frame_distances(s);
3613 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3614 ff_set_mpeg4_time(s);
3616 s->me.scene_change_score=0;
3618 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3620 if(s->pict_type==AV_PICTURE_TYPE_I){
3621 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3622 else s->no_rounding=0;
3623 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3624 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3625 s->no_rounding ^= 1;
3628 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3629 if (estimate_qp(s,1) < 0)
3631 ff_get_2pass_fcode(s);
3632 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3633 if(s->pict_type==AV_PICTURE_TYPE_B)
3634 s->lambda= s->last_lambda_for[s->pict_type];
3636 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3640 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3641 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3642 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3643 s->q_chroma_intra_matrix = s->q_intra_matrix;
3644 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3647 s->mb_intra=0; //for the rate distortion & bit compare functions
3648 for(i=1; i<context_count; i++){
3649 ret = ff_update_duplicate_context(s->thread_context[i], s);
3657 /* Estimate motion for every MB */
3658 if(s->pict_type != AV_PICTURE_TYPE_I){
3659 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3660 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3661 if (s->pict_type != AV_PICTURE_TYPE_B) {
3662 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3663 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3667 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3668 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3670 for(i=0; i<s->mb_stride*s->mb_height; i++)
3671 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3673 if(!s->fixed_qscale){
3674 /* finding spatial complexity for I-frame rate control */
3675 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3678 for(i=1; i<context_count; i++){
3679 merge_context_after_me(s, s->thread_context[i]);
3681 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3682 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3685 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3686 s->pict_type= AV_PICTURE_TYPE_I;
3687 for(i=0; i<s->mb_stride*s->mb_height; i++)
3688 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3689 if(s->msmpeg4_version >= 3)
3691 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3692 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3696 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3697 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3699 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3701 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3702 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3703 s->f_code= FFMAX3(s->f_code, a, b);
3706 ff_fix_long_p_mvs(s);
3707 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3708 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3712 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3713 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3718 if(s->pict_type==AV_PICTURE_TYPE_B){
3721 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3722 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3723 s->f_code = FFMAX(a, b);
3725 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3726 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3727 s->b_code = FFMAX(a, b);
3729 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3730 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3731 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3732 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3733 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3735 for(dir=0; dir<2; dir++){
3738 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3739 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3740 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3741 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3749 if (estimate_qp(s, 0) < 0)
3752 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3753 s->pict_type == AV_PICTURE_TYPE_I &&
3754 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3755 s->qscale= 3; //reduce clipping problems
3757 if (s->out_format == FMT_MJPEG) {
3758 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3759 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3761 if (s->avctx->intra_matrix) {
3763 luma_matrix = s->avctx->intra_matrix;
3765 if (s->avctx->chroma_intra_matrix)
3766 chroma_matrix = s->avctx->chroma_intra_matrix;
3768 /* for mjpeg, we do include qscale in the matrix */
3770 int j = s->idsp.idct_permutation[i];
3772 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3773 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3775 s->y_dc_scale_table=
3776 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3777 s->chroma_intra_matrix[0] =
3778 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3779 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3780 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3781 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3782 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3785 if(s->codec_id == AV_CODEC_ID_AMV){
3786 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3787 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3789 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3791 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3792 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3794 s->y_dc_scale_table= y;
3795 s->c_dc_scale_table= c;
3796 s->intra_matrix[0] = 13;
3797 s->chroma_intra_matrix[0] = 14;
3798 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3799 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3800 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3801 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3805 //FIXME var duplication
3806 s->current_picture_ptr->f->key_frame =
3807 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3808 s->current_picture_ptr->f->pict_type =
3809 s->current_picture.f->pict_type = s->pict_type;
3811 if (s->current_picture.f->key_frame)
3812 s->picture_in_gop_number=0;
3814 s->mb_x = s->mb_y = 0;
3815 s->last_bits= put_bits_count(&s->pb);
3816 switch(s->out_format) {
3818 if (CONFIG_MJPEG_ENCODER)
3819 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3820 s->intra_matrix, s->chroma_intra_matrix);
3823 if (CONFIG_H261_ENCODER)
3824 ff_h261_encode_picture_header(s, picture_number);
3827 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3828 ff_wmv2_encode_picture_header(s, picture_number);
3829 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3830 ff_msmpeg4_encode_picture_header(s, picture_number);
3831 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3832 ff_mpeg4_encode_picture_header(s, picture_number);
3833 else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3834 ret = ff_rv10_encode_picture_header(s, picture_number);
3838 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3839 ff_rv20_encode_picture_header(s, picture_number);
3840 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3841 ff_flv_encode_picture_header(s, picture_number);
3842 else if (CONFIG_H263_ENCODER)
3843 ff_h263_encode_picture_header(s, picture_number);
3846 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3847 ff_mpeg1_encode_picture_header(s, picture_number);
3852 bits= put_bits_count(&s->pb);
3853 s->header_bits= bits - s->last_bits;
3855 for(i=1; i<context_count; i++){
3856 update_duplicate_context_after_me(s->thread_context[i], s);
3858 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3859 for(i=1; i<context_count; i++){
3860 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3861 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3862 merge_context_after_encode(s, s->thread_context[i]);
3868 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3869 const int intra= s->mb_intra;
3872 s->dct_count[intra]++;
3874 for(i=0; i<64; i++){
3875 int level= block[i];
3879 s->dct_error_sum[intra][i] += level;
3880 level -= s->dct_offset[intra][i];
3881 if(level<0) level=0;
3883 s->dct_error_sum[intra][i] -= level;
3884 level += s->dct_offset[intra][i];
3885 if(level>0) level=0;
3892 static int dct_quantize_trellis_c(MpegEncContext *s,
3893 int16_t *block, int n,
3894 int qscale, int *overflow){
3896 const uint16_t *matrix;
3897 const uint8_t *scantable= s->intra_scantable.scantable;
3898 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3900 unsigned int threshold1, threshold2;
3912 int coeff_count[64];
3913 int qmul, qadd, start_i, last_non_zero, i, dc;
3914 const int esc_length= s->ac_esc_length;
3916 uint8_t * last_length;
3917 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3920 s->fdsp.fdct(block);
3922 if(s->dct_error_sum)
3923 s->denoise_dct(s, block);
3925 qadd= ((qscale-1)|1)*8;
3927 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3928 else mpeg2_qscale = qscale << 1;
3939 /* For AIC we skip quant/dequant of INTRADC */
3944 /* note: block[0] is assumed to be positive */
3945 block[0] = (block[0] + (q >> 1)) / q;
3948 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3949 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3950 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3951 bias= 1<<(QMAT_SHIFT-1);
3953 if (n > 3 && s->intra_chroma_ac_vlc_length) {
3954 length = s->intra_chroma_ac_vlc_length;
3955 last_length= s->intra_chroma_ac_vlc_last_length;
3957 length = s->intra_ac_vlc_length;
3958 last_length= s->intra_ac_vlc_last_length;
3963 qmat = s->q_inter_matrix[qscale];
3964 matrix = s->inter_matrix;
3965 length = s->inter_ac_vlc_length;
3966 last_length= s->inter_ac_vlc_last_length;
3970 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3971 threshold2= (threshold1<<1);
3973 for(i=63; i>=start_i; i--) {
3974 const int j = scantable[i];
3975 int level = block[j] * qmat[j];
3977 if(((unsigned)(level+threshold1))>threshold2){
3983 for(i=start_i; i<=last_non_zero; i++) {
3984 const int j = scantable[i];
3985 int level = block[j] * qmat[j];
3987 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3988 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3989 if(((unsigned)(level+threshold1))>threshold2){
3991 level= (bias + level)>>QMAT_SHIFT;
3993 coeff[1][i]= level-1;
3994 // coeff[2][k]= level-2;
3996 level= (bias - level)>>QMAT_SHIFT;
3997 coeff[0][i]= -level;
3998 coeff[1][i]= -level+1;
3999 // coeff[2][k]= -level+2;
4001 coeff_count[i]= FFMIN(level, 2);
4002 av_assert2(coeff_count[i]);
4005 coeff[0][i]= (level>>31)|1;
4010 *overflow= s->max_qcoeff < max; //overflow might have happened
4012 if(last_non_zero < start_i){
4013 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4014 return last_non_zero;
4017 score_tab[start_i]= 0;
4018 survivor[0]= start_i;
4021 for(i=start_i; i<=last_non_zero; i++){
4022 int level_index, j, zero_distortion;
4023 int dct_coeff= FFABS(block[ scantable[i] ]);
4024 int best_score=256*256*256*120;
4026 if (s->fdsp.fdct == ff_fdct_ifast)
4027 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4028 zero_distortion= dct_coeff*dct_coeff;
4030 for(level_index=0; level_index < coeff_count[i]; level_index++){
4032 int level= coeff[level_index][i];
4033 const int alevel= FFABS(level);
4038 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4039 unquant_coeff= alevel*qmul + qadd;
4040 } else if(s->out_format == FMT_MJPEG) {
4041 j = s->idsp.idct_permutation[scantable[i]];
4042 unquant_coeff = alevel * matrix[j] * 8;
4044 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4046 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4047 unquant_coeff = (unquant_coeff - 1) | 1;
4049 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4050 unquant_coeff = (unquant_coeff - 1) | 1;
4055 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4057 if((level&(~127)) == 0){
4058 for(j=survivor_count-1; j>=0; j--){
4059 int run= i - survivor[j];
4060 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4061 score += score_tab[i-run];
4063 if(score < best_score){
4066 level_tab[i+1]= level-64;
4070 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4071 for(j=survivor_count-1; j>=0; j--){
4072 int run= i - survivor[j];
4073 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4074 score += score_tab[i-run];
4075 if(score < last_score){
4078 last_level= level-64;
4084 distortion += esc_length*lambda;
4085 for(j=survivor_count-1; j>=0; j--){
4086 int run= i - survivor[j];
4087 int score= distortion + score_tab[i-run];
4089 if(score < best_score){
4092 level_tab[i+1]= level-64;
4096 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4097 for(j=survivor_count-1; j>=0; j--){
4098 int run= i - survivor[j];
4099 int score= distortion + score_tab[i-run];
4100 if(score < last_score){
4103 last_level= level-64;
4111 score_tab[i+1]= best_score;
4113 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
4114 if(last_non_zero <= 27){
4115 for(; survivor_count; survivor_count--){
4116 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4120 for(; survivor_count; survivor_count--){
4121 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4126 survivor[ survivor_count++ ]= i+1;
4129 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4130 last_score= 256*256*256*120;
4131 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4132 int score= score_tab[i];
4133 if(i) score += lambda*2; //FIXME exacter?
4135 if(score < last_score){
4138 last_level= level_tab[i];
4139 last_run= run_tab[i];
4144 s->coded_score[n] = last_score;
4146 dc= FFABS(block[0]);
4147 last_non_zero= last_i - 1;
4148 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4150 if(last_non_zero < start_i)
4151 return last_non_zero;
4153 if(last_non_zero == 0 && start_i == 0){
4155 int best_score= dc * dc;
4157 for(i=0; i<coeff_count[0]; i++){
4158 int level= coeff[i][0];
4159 int alevel= FFABS(level);
4160 int unquant_coeff, score, distortion;
4162 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4163 unquant_coeff= (alevel*qmul + qadd)>>3;
4165 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4166 unquant_coeff = (unquant_coeff - 1) | 1;
4168 unquant_coeff = (unquant_coeff + 4) >> 3;
4169 unquant_coeff<<= 3 + 3;
4171 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4173 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4174 else score= distortion + esc_length*lambda;
4176 if(score < best_score){
4178 best_level= level - 64;
4181 block[0]= best_level;
4182 s->coded_score[n] = best_score - dc*dc;
4183 if(best_level == 0) return -1;
4184 else return last_non_zero;
4188 av_assert2(last_level);
4190 block[ perm_scantable[last_non_zero] ]= last_level;
4193 for(; i>start_i; i -= run_tab[i] + 1){
4194 block[ perm_scantable[i-1] ]= level_tab[i];
4197 return last_non_zero;
4200 //#define REFINE_STATS 1
4201 static int16_t basis[64][64];
4203 static void build_basis(uint8_t *perm){
4210 double s= 0.25*(1<<BASIS_SHIFT);
4212 int perm_index= perm[index];
4213 if(i==0) s*= sqrt(0.5);
4214 if(j==0) s*= sqrt(0.5);
4215 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4222 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4223 int16_t *block, int16_t *weight, int16_t *orig,
4226 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4227 const uint8_t *scantable= s->intra_scantable.scantable;
4228 const uint8_t *perm_scantable= s->intra_scantable.permutated;
4229 // unsigned int threshold1, threshold2;
4234 int qmul, qadd, start_i, last_non_zero, i, dc;
4236 uint8_t * last_length;
4238 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4241 static int after_last=0;
4242 static int to_zero=0;
4243 static int from_zero=0;
4246 static int messed_sign=0;
4249 if(basis[0][0] == 0)
4250 build_basis(s->idsp.idct_permutation);
4261 /* For AIC we skip quant/dequant of INTRADC */
4265 q <<= RECON_SHIFT-3;
4266 /* note: block[0] is assumed to be positive */
4268 // block[0] = (block[0] + (q >> 1)) / q;
4270 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4271 // bias= 1<<(QMAT_SHIFT-1);
4272 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4273 length = s->intra_chroma_ac_vlc_length;
4274 last_length= s->intra_chroma_ac_vlc_last_length;
4276 length = s->intra_ac_vlc_length;
4277 last_length= s->intra_ac_vlc_last_length;
4282 length = s->inter_ac_vlc_length;
4283 last_length= s->inter_ac_vlc_last_length;
4285 last_non_zero = s->block_last_index[n];
4290 dc += (1<<(RECON_SHIFT-1));
4291 for(i=0; i<64; i++){
4292 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
4295 STOP_TIMER("memset rem[]")}
4298 for(i=0; i<64; i++){
4303 w= FFABS(weight[i]) + qns*one;
4304 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4307 // w=weight[i] = (63*qns + (w/2)) / w;
4310 av_assert2(w<(1<<6));
4313 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4319 for(i=start_i; i<=last_non_zero; i++){
4320 int j= perm_scantable[i];
4321 const int level= block[j];
4325 if(level<0) coeff= qmul*level - qadd;
4326 else coeff= qmul*level + qadd;
4327 run_tab[rle_index++]=run;
4330 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4336 if(last_non_zero>0){
4337 STOP_TIMER("init rem[]")
4344 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4347 int run2, best_unquant_change=0, analyze_gradient;
4351 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4353 if(analyze_gradient){
4357 for(i=0; i<64; i++){
4360 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4363 STOP_TIMER("rem*w*w")}
4373 const int level= block[0];
4374 int change, old_coeff;
4376 av_assert2(s->mb_intra);
4380 for(change=-1; change<=1; change+=2){
4381 int new_level= level + change;
4382 int score, new_coeff;
4384 new_coeff= q*new_level;
4385 if(new_coeff >= 2048 || new_coeff < 0)
4388 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4389 new_coeff - old_coeff);
4390 if(score<best_score){
4393 best_change= change;
4394 best_unquant_change= new_coeff - old_coeff;
4401 run2= run_tab[rle_index++];
4405 for(i=start_i; i<64; i++){
4406 int j= perm_scantable[i];
4407 const int level= block[j];
4408 int change, old_coeff;
4410 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4414 if(level<0) old_coeff= qmul*level - qadd;
4415 else old_coeff= qmul*level + qadd;
4416 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4420 av_assert2(run2>=0 || i >= last_non_zero );
4423 for(change=-1; change<=1; change+=2){
4424 int new_level= level + change;
4425 int score, new_coeff, unquant_change;
4428 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4432 if(new_level<0) new_coeff= qmul*new_level - qadd;
4433 else new_coeff= qmul*new_level + qadd;
4434 if(new_coeff >= 2048 || new_coeff <= -2048)
4436 //FIXME check for overflow
4439 if(level < 63 && level > -63){
4440 if(i < last_non_zero)
4441 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4442 - length[UNI_AC_ENC_INDEX(run, level+64)];
4444 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4445 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4448 av_assert2(FFABS(new_level)==1);
4450 if(analyze_gradient){
4451 int g= d1[ scantable[i] ];
4452 if(g && (g^new_level) >= 0)
4456 if(i < last_non_zero){
4457 int next_i= i + run2 + 1;
4458 int next_level= block[ perm_scantable[next_i] ] + 64;
4460 if(next_level&(~127))
4463 if(next_i < last_non_zero)
4464 score += length[UNI_AC_ENC_INDEX(run, 65)]
4465 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4466 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4468 score += length[UNI_AC_ENC_INDEX(run, 65)]
4469 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4470 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4472 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4474 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4475 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4481 av_assert2(FFABS(level)==1);
4483 if(i < last_non_zero){
4484 int next_i= i + run2 + 1;
4485 int next_level= block[ perm_scantable[next_i] ] + 64;
4487 if(next_level&(~127))
4490 if(next_i < last_non_zero)
4491 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4492 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4493 - length[UNI_AC_ENC_INDEX(run, 65)];
4495 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4496 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4497 - length[UNI_AC_ENC_INDEX(run, 65)];
4499 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4501 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4502 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4509 unquant_change= new_coeff - old_coeff;
4510 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4512 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4514 if(score<best_score){
4517 best_change= change;
4518 best_unquant_change= unquant_change;
4522 prev_level= level + 64;
4523 if(prev_level&(~127))
4532 STOP_TIMER("iterative step")}
4536 int j= perm_scantable[ best_coeff ];
4538 block[j] += best_change;
4540 if(best_coeff > last_non_zero){
4541 last_non_zero= best_coeff;
4542 av_assert2(block[j]);
4549 if(block[j] - best_change){
4550 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4562 for(; last_non_zero>=start_i; last_non_zero--){
4563 if(block[perm_scantable[last_non_zero]])
4569 if(256*256*256*64 % count == 0){
4570 av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4575 for(i=start_i; i<=last_non_zero; i++){
4576 int j= perm_scantable[i];
4577 const int level= block[j];
4580 run_tab[rle_index++]=run;
4587 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4593 if(last_non_zero>0){
4594 STOP_TIMER("iterative search")
4599 return last_non_zero;
4603 * Permute an 8x8 block according to permuatation.
4604 * @param block the block which will be permuted according to
4605 * the given permutation vector
4606 * @param permutation the permutation vector
4607 * @param last the last non zero coefficient in scantable order, used to
4608 * speed the permutation up
4609 * @param scantable the used scantable, this is only used to speed the
4610 * permutation up, the block is not (inverse) permutated
4611 * to scantable order!
4613 void ff_block_permute(int16_t *block, uint8_t *permutation,
4614 const uint8_t *scantable, int last)
4621 //FIXME it is ok but not clean and might fail for some permutations
4622 // if (permutation[1] == 1)
4625 for (i = 0; i <= last; i++) {
4626 const int j = scantable[i];
4631 for (i = 0; i <= last; i++) {
4632 const int j = scantable[i];
4633 const int perm_j = permutation[j];
4634 block[perm_j] = temp[j];
4638 int ff_dct_quantize_c(MpegEncContext *s,
4639 int16_t *block, int n,
4640 int qscale, int *overflow)
4642 int i, j, level, last_non_zero, q, start_i;
4644 const uint8_t *scantable= s->intra_scantable.scantable;
4647 unsigned int threshold1, threshold2;
4649 s->fdsp.fdct(block);
4651 if(s->dct_error_sum)
4652 s->denoise_dct(s, block);
4662 /* For AIC we skip quant/dequant of INTRADC */
4665 /* note: block[0] is assumed to be positive */
4666 block[0] = (block[0] + (q >> 1)) / q;
4669 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4670 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4674 qmat = s->q_inter_matrix[qscale];
4675 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4677 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4678 threshold2= (threshold1<<1);
4679 for(i=63;i>=start_i;i--) {
4681 level = block[j] * qmat[j];
4683 if(((unsigned)(level+threshold1))>threshold2){
4690 for(i=start_i; i<=last_non_zero; i++) {
4692 level = block[j] * qmat[j];
4694 // if( bias+level >= (1<<QMAT_SHIFT)
4695 // || bias-level >= (1<<QMAT_SHIFT)){
4696 if(((unsigned)(level+threshold1))>threshold2){
4698 level= (bias + level)>>QMAT_SHIFT;
4701 level= (bias - level)>>QMAT_SHIFT;
4709 *overflow= s->max_qcoeff < max; //overflow might have happened
4711 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4712 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4713 ff_block_permute(block, s->idsp.idct_permutation,
4714 scantable, last_non_zero);
4716 return last_non_zero;
4719 #define OFFSET(x) offsetof(MpegEncContext, x)
4720 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4721 static const AVOption h263_options[] = {
4722 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4723 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4728 static const AVClass h263_class = {
4729 .class_name = "H.263 encoder",
4730 .item_name = av_default_item_name,
4731 .option = h263_options,
4732 .version = LIBAVUTIL_VERSION_INT,
4735 AVCodec ff_h263_encoder = {
4737 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4738 .type = AVMEDIA_TYPE_VIDEO,
4739 .id = AV_CODEC_ID_H263,
4740 .priv_data_size = sizeof(MpegEncContext),
4741 .init = ff_mpv_encode_init,
4742 .encode2 = ff_mpv_encode_picture,
4743 .close = ff_mpv_encode_end,
4744 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4745 .priv_class = &h263_class,
4748 static const AVOption h263p_options[] = {
4749 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4750 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4751 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4752 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4756 static const AVClass h263p_class = {
4757 .class_name = "H.263p encoder",
4758 .item_name = av_default_item_name,
4759 .option = h263p_options,
4760 .version = LIBAVUTIL_VERSION_INT,
4763 AVCodec ff_h263p_encoder = {
4765 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4766 .type = AVMEDIA_TYPE_VIDEO,
4767 .id = AV_CODEC_ID_H263P,
4768 .priv_data_size = sizeof(MpegEncContext),
4769 .init = ff_mpv_encode_init,
4770 .encode2 = ff_mpv_encode_picture,
4771 .close = ff_mpv_encode_end,
4772 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4773 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4774 .priv_class = &h263p_class,
4777 static const AVClass msmpeg4v2_class = {
4778 .class_name = "msmpeg4v2 encoder",
4779 .item_name = av_default_item_name,
4780 .option = ff_mpv_generic_options,
4781 .version = LIBAVUTIL_VERSION_INT,
4784 AVCodec ff_msmpeg4v2_encoder = {
4785 .name = "msmpeg4v2",
4786 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4787 .type = AVMEDIA_TYPE_VIDEO,
4788 .id = AV_CODEC_ID_MSMPEG4V2,
4789 .priv_data_size = sizeof(MpegEncContext),
4790 .init = ff_mpv_encode_init,
4791 .encode2 = ff_mpv_encode_picture,
4792 .close = ff_mpv_encode_end,
4793 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4794 .priv_class = &msmpeg4v2_class,
4797 static const AVClass msmpeg4v3_class = {
4798 .class_name = "msmpeg4v3 encoder",
4799 .item_name = av_default_item_name,
4800 .option = ff_mpv_generic_options,
4801 .version = LIBAVUTIL_VERSION_INT,
4804 AVCodec ff_msmpeg4v3_encoder = {
4806 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4807 .type = AVMEDIA_TYPE_VIDEO,
4808 .id = AV_CODEC_ID_MSMPEG4V3,
4809 .priv_data_size = sizeof(MpegEncContext),
4810 .init = ff_mpv_encode_init,
4811 .encode2 = ff_mpv_encode_picture,
4812 .close = ff_mpv_encode_end,
4813 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4814 .priv_class = &msmpeg4v3_class,
4817 static const AVClass wmv1_class = {
4818 .class_name = "wmv1 encoder",
4819 .item_name = av_default_item_name,
4820 .option = ff_mpv_generic_options,
4821 .version = LIBAVUTIL_VERSION_INT,
4824 AVCodec ff_wmv1_encoder = {
4826 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4827 .type = AVMEDIA_TYPE_VIDEO,
4828 .id = AV_CODEC_ID_WMV1,
4829 .priv_data_size = sizeof(MpegEncContext),
4830 .init = ff_mpv_encode_init,
4831 .encode2 = ff_mpv_encode_picture,
4832 .close = ff_mpv_encode_end,
4833 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4834 .priv_class = &wmv1_class,