2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "libavutil/timer.h"
46 #include "mpegvideo.h"
47 #include "mpegvideodata.h"
51 #include "mjpegenc_common.h"
53 #include "mpegutils.h"
56 #include "pixblockdsp.h"
60 #include "aandcttab.h"
62 #include "mpeg4video.h"
64 #include "bytestream.h"
70 #define QUANT_BIAS_SHIFT 8
72 #define QMAT_SHIFT_MMX 16
75 static int encode_picture(MpegEncContext *s, int picture_number);
76 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
77 static int sse_mb(MpegEncContext *s);
78 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
79 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
81 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
82 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
84 const AVOption ff_mpv_generic_options[] = {
89 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
90 uint16_t (*qmat16)[2][64],
91 const uint16_t *quant_matrix,
92 int bias, int qmin, int qmax, int intra)
94 FDCTDSPContext *fdsp = &s->fdsp;
98 for (qscale = qmin; qscale <= qmax; qscale++) {
102 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
103 else qscale2 = qscale << 1;
105 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
107 fdsp->fdct == ff_faandct ||
108 #endif /* CONFIG_FAANDCT */
109 fdsp->fdct == ff_jpeg_fdct_islow_10) {
110 for (i = 0; i < 64; i++) {
111 const int j = s->idsp.idct_permutation[i];
112 int64_t den = (int64_t) qscale2 * quant_matrix[j];
113 /* 16 <= qscale * quant_matrix[i] <= 7905
114 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
115 * 19952 <= x <= 249205026
116 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
117 * 3444240 >= (1 << 36) / (x) >= 275 */
119 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
121 } else if (fdsp->fdct == ff_fdct_ifast) {
122 for (i = 0; i < 64; i++) {
123 const int j = s->idsp.idct_permutation[i];
124 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
125 /* 16 <= qscale * quant_matrix[i] <= 7905
126 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
127 * 19952 <= x <= 249205026
128 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
129 * 3444240 >= (1 << 36) / (x) >= 275 */
131 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
134 for (i = 0; i < 64; i++) {
135 const int j = s->idsp.idct_permutation[i];
136 int64_t den = (int64_t) qscale2 * quant_matrix[j];
137 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
138 * Assume x = qscale * quant_matrix[i]
140 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
141 * so 32768 >= (1 << 19) / (x) >= 67 */
142 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
143 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
144 // (qscale * quant_matrix[i]);
145 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
147 if (qmat16[qscale][0][i] == 0 ||
148 qmat16[qscale][0][i] == 128 * 256)
149 qmat16[qscale][0][i] = 128 * 256 - 1;
150 qmat16[qscale][1][i] =
151 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
152 qmat16[qscale][0][i]);
156 for (i = intra; i < 64; i++) {
158 if (fdsp->fdct == ff_fdct_ifast) {
159 max = (8191LL * ff_aanscales[i]) >> 14;
161 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
167 av_log(NULL, AV_LOG_INFO,
168 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
173 static inline void update_qscale(MpegEncContext *s)
175 if (s->q_scale_type == 1 && 0) {
177 int bestdiff=INT_MAX;
180 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
181 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
182 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
183 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
185 if (diff < bestdiff) {
192 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
193 (FF_LAMBDA_SHIFT + 7);
194 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
197 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
201 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
207 for (i = 0; i < 64; i++) {
208 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
215 * init s->current_picture.qscale_table from s->lambda_table
217 void ff_init_qscale_tab(MpegEncContext *s)
219 int8_t * const qscale_table = s->current_picture.qscale_table;
222 for (i = 0; i < s->mb_num; i++) {
223 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
224 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
225 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
230 static void update_duplicate_context_after_me(MpegEncContext *dst,
233 #define COPY(a) dst->a= src->a
235 COPY(current_picture);
241 COPY(picture_in_gop_number);
242 COPY(gop_picture_number);
243 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
244 COPY(progressive_frame); // FIXME don't set in encode_header
245 COPY(partitioned_frame); // FIXME don't set in encode_header
250 * Set the given MpegEncContext to defaults for encoding.
251 * the changed fields will not depend upon the prior state of the MpegEncContext.
253 static void mpv_encode_defaults(MpegEncContext *s)
256 ff_mpv_common_defaults(s);
258 for (i = -16; i < 16; i++) {
259 default_fcode_tab[i + MAX_MV] = 1;
261 s->me.mv_penalty = default_mv_penalty;
262 s->fcode_tab = default_fcode_tab;
264 s->input_picture_number = 0;
265 s->picture_in_gop_number = 0;
268 av_cold int ff_dct_encode_init(MpegEncContext *s) {
270 ff_dct_encode_init_x86(s);
272 if (CONFIG_H263_ENCODER)
273 ff_h263dsp_init(&s->h263dsp);
274 if (!s->dct_quantize)
275 s->dct_quantize = ff_dct_quantize_c;
277 s->denoise_dct = denoise_dct_c;
278 s->fast_dct_quantize = s->dct_quantize;
279 if (s->avctx->trellis)
280 s->dct_quantize = dct_quantize_trellis_c;
285 /* init video encoder */
286 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
288 MpegEncContext *s = avctx->priv_data;
289 AVCPBProperties *cpb_props;
290 int i, ret, format_supported;
292 mpv_encode_defaults(s);
294 switch (avctx->codec_id) {
295 case AV_CODEC_ID_MPEG2VIDEO:
296 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
297 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
298 av_log(avctx, AV_LOG_ERROR,
299 "only YUV420 and YUV422 are supported\n");
303 case AV_CODEC_ID_MJPEG:
304 case AV_CODEC_ID_AMV:
305 format_supported = 0;
306 /* JPEG color space */
307 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
308 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
309 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
310 (avctx->color_range == AVCOL_RANGE_JPEG &&
311 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
312 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
313 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
314 format_supported = 1;
315 /* MPEG color space */
316 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
317 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
318 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
319 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
320 format_supported = 1;
322 if (!format_supported) {
323 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
328 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
329 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
334 switch (avctx->pix_fmt) {
335 case AV_PIX_FMT_YUVJ444P:
336 case AV_PIX_FMT_YUV444P:
337 s->chroma_format = CHROMA_444;
339 case AV_PIX_FMT_YUVJ422P:
340 case AV_PIX_FMT_YUV422P:
341 s->chroma_format = CHROMA_422;
343 case AV_PIX_FMT_YUVJ420P:
344 case AV_PIX_FMT_YUV420P:
346 s->chroma_format = CHROMA_420;
350 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
351 s->bit_rate = avctx->bit_rate;
352 s->width = avctx->width;
353 s->height = avctx->height;
354 if (avctx->gop_size > 600 &&
355 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
356 av_log(avctx, AV_LOG_WARNING,
357 "keyframe interval too large!, reducing it from %d to %d\n",
358 avctx->gop_size, 600);
359 avctx->gop_size = 600;
361 s->gop_size = avctx->gop_size;
363 if (avctx->max_b_frames > MAX_B_FRAMES) {
364 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
365 "is %d.\n", MAX_B_FRAMES);
366 avctx->max_b_frames = MAX_B_FRAMES;
368 s->max_b_frames = avctx->max_b_frames;
369 s->codec_id = avctx->codec->id;
370 s->strict_std_compliance = avctx->strict_std_compliance;
371 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
372 s->mpeg_quant = avctx->mpeg_quant;
373 s->rtp_mode = !!avctx->rtp_payload_size;
374 s->intra_dc_precision = avctx->intra_dc_precision;
376 // workaround some differences between how applications specify dc precision
377 if (s->intra_dc_precision < 0) {
378 s->intra_dc_precision += 8;
379 } else if (s->intra_dc_precision >= 8)
380 s->intra_dc_precision -= 8;
382 if (s->intra_dc_precision < 0) {
383 av_log(avctx, AV_LOG_ERROR,
384 "intra dc precision must be positive, note some applications use"
385 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
386 return AVERROR(EINVAL);
389 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
390 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
391 return AVERROR(EINVAL);
393 s->user_specified_pts = AV_NOPTS_VALUE;
395 if (s->gop_size <= 1) {
402 #if FF_API_MOTION_EST
403 FF_DISABLE_DEPRECATION_WARNINGS
404 s->me_method = avctx->me_method;
405 FF_ENABLE_DEPRECATION_WARNINGS
409 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
412 FF_DISABLE_DEPRECATION_WARNINGS
413 if (avctx->border_masking != 0.0)
414 s->border_masking = avctx->border_masking;
415 FF_ENABLE_DEPRECATION_WARNINGS
418 s->adaptive_quant = (s->avctx->lumi_masking ||
419 s->avctx->dark_masking ||
420 s->avctx->temporal_cplx_masking ||
421 s->avctx->spatial_cplx_masking ||
422 s->avctx->p_masking ||
424 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
427 s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
429 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
430 switch(avctx->codec_id) {
431 case AV_CODEC_ID_MPEG1VIDEO:
432 case AV_CODEC_ID_MPEG2VIDEO:
433 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
435 case AV_CODEC_ID_MPEG4:
436 case AV_CODEC_ID_MSMPEG4V1:
437 case AV_CODEC_ID_MSMPEG4V2:
438 case AV_CODEC_ID_MSMPEG4V3:
439 if (avctx->rc_max_rate >= 15000000) {
440 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
441 } else if(avctx->rc_max_rate >= 2000000) {
442 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
443 } else if(avctx->rc_max_rate >= 384000) {
444 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
446 avctx->rc_buffer_size = 40;
447 avctx->rc_buffer_size *= 16384;
450 if (avctx->rc_buffer_size) {
451 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
455 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
456 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
460 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
461 av_log(avctx, AV_LOG_INFO,
462 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
465 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
466 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
470 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
471 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
475 if (avctx->rc_max_rate &&
476 avctx->rc_max_rate == avctx->bit_rate &&
477 avctx->rc_max_rate != avctx->rc_min_rate) {
478 av_log(avctx, AV_LOG_INFO,
479 "impossible bitrate constraints, this will fail\n");
482 if (avctx->rc_buffer_size &&
483 avctx->bit_rate * (int64_t)avctx->time_base.num >
484 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
485 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
489 if (!s->fixed_qscale &&
490 avctx->bit_rate * av_q2d(avctx->time_base) >
491 avctx->bit_rate_tolerance) {
492 av_log(avctx, AV_LOG_WARNING,
493 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, (int64_t)avctx->bit_rate);
494 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
497 if (s->avctx->rc_max_rate &&
498 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
499 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
500 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
501 90000LL * (avctx->rc_buffer_size - 1) >
502 s->avctx->rc_max_rate * 0xFFFFLL) {
503 av_log(avctx, AV_LOG_INFO,
504 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
505 "specified vbv buffer is too large for the given bitrate!\n");
508 if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
509 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
510 s->codec_id != AV_CODEC_ID_FLV1) {
511 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
515 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
516 av_log(avctx, AV_LOG_ERROR,
517 "OBMC is only supported with simple mb decision\n");
521 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
522 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
526 if (s->max_b_frames &&
527 s->codec_id != AV_CODEC_ID_MPEG4 &&
528 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
529 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
530 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
533 if (s->max_b_frames < 0) {
534 av_log(avctx, AV_LOG_ERROR,
535 "max b frames must be 0 or positive for mpegvideo based encoders\n");
539 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
540 s->codec_id == AV_CODEC_ID_H263 ||
541 s->codec_id == AV_CODEC_ID_H263P) &&
542 (avctx->sample_aspect_ratio.num > 255 ||
543 avctx->sample_aspect_ratio.den > 255)) {
544 av_log(avctx, AV_LOG_WARNING,
545 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
546 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
547 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
548 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
551 if ((s->codec_id == AV_CODEC_ID_H263 ||
552 s->codec_id == AV_CODEC_ID_H263P) &&
553 (avctx->width > 2048 ||
554 avctx->height > 1152 )) {
555 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
558 if ((s->codec_id == AV_CODEC_ID_H263 ||
559 s->codec_id == AV_CODEC_ID_H263P) &&
560 ((avctx->width &3) ||
561 (avctx->height&3) )) {
562 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
566 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
567 (avctx->width > 4095 ||
568 avctx->height > 4095 )) {
569 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
573 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
574 (avctx->width > 16383 ||
575 avctx->height > 16383 )) {
576 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
580 if (s->codec_id == AV_CODEC_ID_RV10 &&
582 avctx->height&15 )) {
583 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
584 return AVERROR(EINVAL);
587 if (s->codec_id == AV_CODEC_ID_RV20 &&
590 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
591 return AVERROR(EINVAL);
594 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
595 s->codec_id == AV_CODEC_ID_WMV2) &&
597 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
601 if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
602 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
603 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
607 // FIXME mpeg2 uses that too
608 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
609 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
610 av_log(avctx, AV_LOG_ERROR,
611 "mpeg2 style quantization not supported by codec\n");
615 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
616 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
620 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
621 s->avctx->mb_decision != FF_MB_DECISION_RD) {
622 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
626 if (s->avctx->scenechange_threshold < 1000000000 &&
627 (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
628 av_log(avctx, AV_LOG_ERROR,
629 "closed gop with scene change detection are not supported yet, "
630 "set threshold to 1000000000\n");
634 if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
635 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
636 av_log(avctx, AV_LOG_ERROR,
637 "low delay forcing is only available for mpeg2\n");
640 if (s->max_b_frames != 0) {
641 av_log(avctx, AV_LOG_ERROR,
642 "b frames cannot be used with low delay\n");
647 if (s->q_scale_type == 1) {
648 if (avctx->qmax > 28) {
649 av_log(avctx, AV_LOG_ERROR,
650 "non linear quant only supports qmax <= 28 currently\n");
655 if (avctx->slices > 1 &&
656 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
657 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
658 return AVERROR(EINVAL);
661 if (s->avctx->thread_count > 1 &&
662 s->codec_id != AV_CODEC_ID_MPEG4 &&
663 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
664 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
665 s->codec_id != AV_CODEC_ID_MJPEG &&
666 (s->codec_id != AV_CODEC_ID_H263P)) {
667 av_log(avctx, AV_LOG_ERROR,
668 "multi threaded encoding not supported by codec\n");
672 if (s->avctx->thread_count < 1) {
673 av_log(avctx, AV_LOG_ERROR,
674 "automatic thread number detection not supported by codec, "
679 if (!avctx->time_base.den || !avctx->time_base.num) {
680 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
684 #if FF_API_PRIVATE_OPT
685 FF_DISABLE_DEPRECATION_WARNINGS
686 if (avctx->b_frame_strategy)
687 s->b_frame_strategy = avctx->b_frame_strategy;
688 if (avctx->b_sensitivity != 40)
689 s->b_sensitivity = avctx->b_sensitivity;
690 FF_ENABLE_DEPRECATION_WARNINGS
693 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
694 av_log(avctx, AV_LOG_INFO,
695 "notice: b_frame_strategy only affects the first pass\n");
696 s->b_frame_strategy = 0;
699 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
701 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
702 avctx->time_base.den /= i;
703 avctx->time_base.num /= i;
707 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
708 // (a + x * 3 / 8) / x
709 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
710 s->inter_quant_bias = 0;
712 s->intra_quant_bias = 0;
714 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
717 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
718 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
719 return AVERROR(EINVAL);
722 #if FF_API_QUANT_BIAS
723 FF_DISABLE_DEPRECATION_WARNINGS
724 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
725 s->intra_quant_bias = avctx->intra_quant_bias;
726 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
727 s->inter_quant_bias = avctx->inter_quant_bias;
728 FF_ENABLE_DEPRECATION_WARNINGS
731 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
733 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
734 s->avctx->time_base.den > (1 << 16) - 1) {
735 av_log(avctx, AV_LOG_ERROR,
736 "timebase %d/%d not supported by MPEG 4 standard, "
737 "the maximum admitted value for the timebase denominator "
738 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
742 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
744 switch (avctx->codec->id) {
745 case AV_CODEC_ID_MPEG1VIDEO:
746 s->out_format = FMT_MPEG1;
747 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
748 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
750 case AV_CODEC_ID_MPEG2VIDEO:
751 s->out_format = FMT_MPEG1;
752 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
753 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
756 case AV_CODEC_ID_MJPEG:
757 case AV_CODEC_ID_AMV:
758 s->out_format = FMT_MJPEG;
759 s->intra_only = 1; /* force intra only for jpeg */
760 if (!CONFIG_MJPEG_ENCODER ||
761 ff_mjpeg_encode_init(s) < 0)
766 case AV_CODEC_ID_H261:
767 if (!CONFIG_H261_ENCODER)
769 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
770 av_log(avctx, AV_LOG_ERROR,
771 "The specified picture size of %dx%d is not valid for the "
772 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
773 s->width, s->height);
776 s->out_format = FMT_H261;
779 s->rtp_mode = 0; /* Sliced encoding not supported */
781 case AV_CODEC_ID_H263:
782 if (!CONFIG_H263_ENCODER)
784 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
785 s->width, s->height) == 8) {
786 av_log(avctx, AV_LOG_ERROR,
787 "The specified picture size of %dx%d is not valid for "
788 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
789 "352x288, 704x576, and 1408x1152. "
790 "Try H.263+.\n", s->width, s->height);
793 s->out_format = FMT_H263;
797 case AV_CODEC_ID_H263P:
798 s->out_format = FMT_H263;
801 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
802 s->modified_quant = s->h263_aic;
803 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
804 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
807 /* These are just to be sure */
811 case AV_CODEC_ID_FLV1:
812 s->out_format = FMT_H263;
813 s->h263_flv = 2; /* format = 1; 11-bit codes */
814 s->unrestricted_mv = 1;
815 s->rtp_mode = 0; /* don't allow GOB */
819 case AV_CODEC_ID_RV10:
820 s->out_format = FMT_H263;
824 case AV_CODEC_ID_RV20:
825 s->out_format = FMT_H263;
828 s->modified_quant = 1;
832 s->unrestricted_mv = 0;
834 case AV_CODEC_ID_MPEG4:
835 s->out_format = FMT_H263;
837 s->unrestricted_mv = 1;
838 s->low_delay = s->max_b_frames ? 0 : 1;
839 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
841 case AV_CODEC_ID_MSMPEG4V2:
842 s->out_format = FMT_H263;
844 s->unrestricted_mv = 1;
845 s->msmpeg4_version = 2;
849 case AV_CODEC_ID_MSMPEG4V3:
850 s->out_format = FMT_H263;
852 s->unrestricted_mv = 1;
853 s->msmpeg4_version = 3;
854 s->flipflop_rounding = 1;
858 case AV_CODEC_ID_WMV1:
859 s->out_format = FMT_H263;
861 s->unrestricted_mv = 1;
862 s->msmpeg4_version = 4;
863 s->flipflop_rounding = 1;
867 case AV_CODEC_ID_WMV2:
868 s->out_format = FMT_H263;
870 s->unrestricted_mv = 1;
871 s->msmpeg4_version = 5;
872 s->flipflop_rounding = 1;
880 avctx->has_b_frames = !s->low_delay;
884 s->progressive_frame =
885 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
886 AV_CODEC_FLAG_INTERLACED_ME) ||
891 if (ff_mpv_common_init(s) < 0)
894 ff_fdctdsp_init(&s->fdsp, avctx);
895 ff_me_cmp_init(&s->mecc, avctx);
896 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
897 ff_pixblockdsp_init(&s->pdsp, avctx);
898 ff_qpeldsp_init(&s->qdsp);
900 if (s->msmpeg4_version) {
901 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
902 2 * 2 * (MAX_LEVEL + 1) *
903 (MAX_RUN + 1) * 2 * sizeof(int), fail);
905 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
907 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
908 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
909 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
910 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
911 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
912 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
913 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
914 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
915 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
916 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
918 if (s->avctx->noise_reduction) {
919 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
920 2 * 64 * sizeof(uint16_t), fail);
923 ff_dct_encode_init(s);
925 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
926 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
928 if (s->slice_context_count > 1) {
931 if (avctx->codec_id == AV_CODEC_ID_H263P)
932 s->h263_slice_structured = 1;
935 s->quant_precision = 5;
937 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
938 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->avctx->frame_skip_cmp);
940 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
941 ff_h261_encode_init(s);
942 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
943 ff_h263_encode_init(s);
944 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
945 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
947 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
948 && s->out_format == FMT_MPEG1)
949 ff_mpeg1_encode_init(s);
952 for (i = 0; i < 64; i++) {
953 int j = s->idsp.idct_permutation[i];
954 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
956 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
957 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
958 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
960 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
963 s->chroma_intra_matrix[j] =
964 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
965 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
967 if (s->avctx->intra_matrix)
968 s->intra_matrix[j] = s->avctx->intra_matrix[i];
969 if (s->avctx->inter_matrix)
970 s->inter_matrix[j] = s->avctx->inter_matrix[i];
973 /* precompute matrix */
974 /* for mjpeg, we do include qscale in the matrix */
975 if (s->out_format != FMT_MJPEG) {
976 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
977 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
979 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
980 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
984 if (ff_rate_control_init(s) < 0)
987 #if FF_API_ERROR_RATE
988 FF_DISABLE_DEPRECATION_WARNINGS
989 if (avctx->error_rate)
990 s->error_rate = avctx->error_rate;
991 FF_ENABLE_DEPRECATION_WARNINGS;
994 #if FF_API_NORMALIZE_AQP
995 FF_DISABLE_DEPRECATION_WARNINGS
996 if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
997 s->mpv_flags |= FF_MPV_FLAG_NAQ;
998 FF_ENABLE_DEPRECATION_WARNINGS;
1002 FF_DISABLE_DEPRECATION_WARNINGS
1003 if (avctx->flags & CODEC_FLAG_MV0)
1004 s->mpv_flags |= FF_MPV_FLAG_MV0;
1005 FF_ENABLE_DEPRECATION_WARNINGS
1009 FF_DISABLE_DEPRECATION_WARNINGS
1010 if (avctx->rc_qsquish != 0.0)
1011 s->rc_qsquish = avctx->rc_qsquish;
1012 if (avctx->rc_qmod_amp != 0.0)
1013 s->rc_qmod_amp = avctx->rc_qmod_amp;
1014 if (avctx->rc_qmod_freq)
1015 s->rc_qmod_freq = avctx->rc_qmod_freq;
1016 if (avctx->rc_buffer_aggressivity != 1.0)
1017 s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
1018 if (avctx->rc_initial_cplx != 0.0)
1019 s->rc_initial_cplx = avctx->rc_initial_cplx;
1021 s->lmin = avctx->lmin;
1023 s->lmax = avctx->lmax;
1026 av_freep(&s->rc_eq);
1027 s->rc_eq = av_strdup(avctx->rc_eq);
1029 return AVERROR(ENOMEM);
1031 FF_ENABLE_DEPRECATION_WARNINGS
1034 #if FF_API_PRIVATE_OPT
1035 FF_DISABLE_DEPRECATION_WARNINGS
1036 if (avctx->brd_scale)
1037 s->brd_scale = avctx->brd_scale;
1038 FF_ENABLE_DEPRECATION_WARNINGS
1041 if (s->b_frame_strategy == 2) {
1042 for (i = 0; i < s->max_b_frames + 2; i++) {
1043 s->tmp_frames[i] = av_frame_alloc();
1044 if (!s->tmp_frames[i])
1045 return AVERROR(ENOMEM);
1047 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1048 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1049 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1051 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1057 cpb_props = ff_add_cpb_side_data(avctx);
1059 return AVERROR(ENOMEM);
1060 cpb_props->max_bitrate = avctx->rc_max_rate;
1061 cpb_props->min_bitrate = avctx->rc_min_rate;
1062 cpb_props->avg_bitrate = avctx->bit_rate;
1063 cpb_props->buffer_size = avctx->rc_buffer_size;
1067 ff_mpv_encode_end(avctx);
1068 return AVERROR_UNKNOWN;
1071 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1073 MpegEncContext *s = avctx->priv_data;
1076 ff_rate_control_uninit(s);
1078 ff_mpv_common_end(s);
1079 if (CONFIG_MJPEG_ENCODER &&
1080 s->out_format == FMT_MJPEG)
1081 ff_mjpeg_encode_close(s);
1083 av_freep(&avctx->extradata);
1085 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1086 av_frame_free(&s->tmp_frames[i]);
1088 ff_free_picture_tables(&s->new_picture);
1089 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1091 av_freep(&s->avctx->stats_out);
1092 av_freep(&s->ac_stats);
1094 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1095 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1096 s->q_chroma_intra_matrix= NULL;
1097 s->q_chroma_intra_matrix16= NULL;
1098 av_freep(&s->q_intra_matrix);
1099 av_freep(&s->q_inter_matrix);
1100 av_freep(&s->q_intra_matrix16);
1101 av_freep(&s->q_inter_matrix16);
1102 av_freep(&s->input_picture);
1103 av_freep(&s->reordered_input_picture);
1104 av_freep(&s->dct_offset);
1109 static int get_sae(uint8_t *src, int ref, int stride)
1114 for (y = 0; y < 16; y++) {
1115 for (x = 0; x < 16; x++) {
1116 acc += FFABS(src[x + y * stride] - ref);
1123 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1124 uint8_t *ref, int stride)
1130 h = s->height & ~15;
1132 for (y = 0; y < h; y += 16) {
1133 for (x = 0; x < w; x += 16) {
1134 int offset = x + y * stride;
1135 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1137 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1138 int sae = get_sae(src + offset, mean, stride);
1140 acc += sae + 500 < sad;
1146 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1148 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1149 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1150 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1151 &s->linesize, &s->uvlinesize);
1154 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1156 Picture *pic = NULL;
1158 int i, display_picture_number = 0, ret;
1159 int encoding_delay = s->max_b_frames ? s->max_b_frames
1160 : (s->low_delay ? 0 : 1);
1161 int flush_offset = 1;
1166 display_picture_number = s->input_picture_number++;
1168 if (pts != AV_NOPTS_VALUE) {
1169 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1170 int64_t last = s->user_specified_pts;
1173 av_log(s->avctx, AV_LOG_ERROR,
1174 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1176 return AVERROR(EINVAL);
1179 if (!s->low_delay && display_picture_number == 1)
1180 s->dts_delta = pts - last;
1182 s->user_specified_pts = pts;
1184 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1185 s->user_specified_pts =
1186 pts = s->user_specified_pts + 1;
1187 av_log(s->avctx, AV_LOG_INFO,
1188 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1191 pts = display_picture_number;
1195 if (!pic_arg->buf[0] ||
1196 pic_arg->linesize[0] != s->linesize ||
1197 pic_arg->linesize[1] != s->uvlinesize ||
1198 pic_arg->linesize[2] != s->uvlinesize)
1200 if ((s->width & 15) || (s->height & 15))
1202 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1204 if (s->linesize & (STRIDE_ALIGN-1))
1207 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1208 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1210 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1214 pic = &s->picture[i];
1218 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1221 ret = alloc_picture(s, pic, direct);
1226 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1227 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1228 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1231 int h_chroma_shift, v_chroma_shift;
1232 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1236 for (i = 0; i < 3; i++) {
1237 int src_stride = pic_arg->linesize[i];
1238 int dst_stride = i ? s->uvlinesize : s->linesize;
1239 int h_shift = i ? h_chroma_shift : 0;
1240 int v_shift = i ? v_chroma_shift : 0;
1241 int w = s->width >> h_shift;
1242 int h = s->height >> v_shift;
1243 uint8_t *src = pic_arg->data[i];
1244 uint8_t *dst = pic->f->data[i];
1247 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1248 && !s->progressive_sequence
1249 && FFALIGN(s->height, 32) - s->height > 16)
1252 if (!s->avctx->rc_buffer_size)
1253 dst += INPLACE_OFFSET;
1255 if (src_stride == dst_stride)
1256 memcpy(dst, src, src_stride * h);
1259 uint8_t *dst2 = dst;
1261 memcpy(dst2, src, w);
1266 if ((s->width & 15) || (s->height & (vpad-1))) {
1267 s->mpvencdsp.draw_edges(dst, dst_stride,
1276 ret = av_frame_copy_props(pic->f, pic_arg);
1280 pic->f->display_picture_number = display_picture_number;
1281 pic->f->pts = pts; // we set this here to avoid modifiying pic_arg
1283 /* Flushing: When we have not received enough input frames,
1284 * ensure s->input_picture[0] contains the first picture */
1285 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1286 if (s->input_picture[flush_offset])
1289 if (flush_offset <= 1)
1292 encoding_delay = encoding_delay - flush_offset + 1;
1295 /* shift buffer entries */
1296 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1297 s->input_picture[i - flush_offset] = s->input_picture[i];
1299 s->input_picture[encoding_delay] = (Picture*) pic;
1304 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1308 int64_t score64 = 0;
1310 for (plane = 0; plane < 3; plane++) {
1311 const int stride = p->f->linesize[plane];
1312 const int bw = plane ? 1 : 2;
1313 for (y = 0; y < s->mb_height * bw; y++) {
1314 for (x = 0; x < s->mb_width * bw; x++) {
1315 int off = p->shared ? 0 : 16;
1316 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1317 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1318 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1320 switch (FFABS(s->avctx->frame_skip_exp)) {
1321 case 0: score = FFMAX(score, v); break;
1322 case 1: score += FFABS(v); break;
1323 case 2: score64 += v * (int64_t)v; break;
1324 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1325 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1334 if (s->avctx->frame_skip_exp < 0)
1335 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1336 -1.0/s->avctx->frame_skip_exp);
1338 if (score64 < s->avctx->frame_skip_threshold)
1340 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1345 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1347 AVPacket pkt = { 0 };
1348 int ret, got_output;
1350 av_init_packet(&pkt);
1351 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1356 av_packet_unref(&pkt);
1360 static int estimate_best_b_count(MpegEncContext *s)
1362 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1363 AVCodecContext *c = avcodec_alloc_context3(NULL);
1364 const int scale = s->brd_scale;
1365 int i, j, out_size, p_lambda, b_lambda, lambda2;
1366 int64_t best_rd = INT64_MAX;
1367 int best_b_count = -1;
1370 return AVERROR(ENOMEM);
1371 av_assert0(scale >= 0 && scale <= 3);
1374 //s->next_picture_ptr->quality;
1375 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1376 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1377 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1378 if (!b_lambda) // FIXME we should do this somewhere else
1379 b_lambda = p_lambda;
1380 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1383 c->width = s->width >> scale;
1384 c->height = s->height >> scale;
1385 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1386 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1387 c->mb_decision = s->avctx->mb_decision;
1388 c->me_cmp = s->avctx->me_cmp;
1389 c->mb_cmp = s->avctx->mb_cmp;
1390 c->me_sub_cmp = s->avctx->me_sub_cmp;
1391 c->pix_fmt = AV_PIX_FMT_YUV420P;
1392 c->time_base = s->avctx->time_base;
1393 c->max_b_frames = s->max_b_frames;
1395 if (avcodec_open2(c, codec, NULL) < 0)
1398 for (i = 0; i < s->max_b_frames + 2; i++) {
1399 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1400 s->next_picture_ptr;
1403 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1404 pre_input = *pre_input_ptr;
1405 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1407 if (!pre_input.shared && i) {
1408 data[0] += INPLACE_OFFSET;
1409 data[1] += INPLACE_OFFSET;
1410 data[2] += INPLACE_OFFSET;
1413 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1414 s->tmp_frames[i]->linesize[0],
1416 pre_input.f->linesize[0],
1417 c->width, c->height);
1418 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1419 s->tmp_frames[i]->linesize[1],
1421 pre_input.f->linesize[1],
1422 c->width >> 1, c->height >> 1);
1423 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1424 s->tmp_frames[i]->linesize[2],
1426 pre_input.f->linesize[2],
1427 c->width >> 1, c->height >> 1);
1431 for (j = 0; j < s->max_b_frames + 1; j++) {
1434 if (!s->input_picture[j])
1437 c->error[0] = c->error[1] = c->error[2] = 0;
1439 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1440 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1442 out_size = encode_frame(c, s->tmp_frames[0]);
1444 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1446 for (i = 0; i < s->max_b_frames + 1; i++) {
1447 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1449 s->tmp_frames[i + 1]->pict_type = is_p ?
1450 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1451 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1453 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1455 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1458 /* get the delayed frames */
1460 out_size = encode_frame(c, NULL);
1461 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1464 rd += c->error[0] + c->error[1] + c->error[2];
1475 return best_b_count;
1478 static int select_input_picture(MpegEncContext *s)
1482 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1483 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1484 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1486 /* set next picture type & ordering */
1487 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1488 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1489 if (s->picture_in_gop_number < s->gop_size &&
1490 s->next_picture_ptr &&
1491 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1492 // FIXME check that te gop check above is +-1 correct
1493 av_frame_unref(s->input_picture[0]->f);
1495 ff_vbv_update(s, 0);
1501 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1502 !s->next_picture_ptr || s->intra_only) {
1503 s->reordered_input_picture[0] = s->input_picture[0];
1504 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1505 s->reordered_input_picture[0]->f->coded_picture_number =
1506 s->coded_picture_number++;
1510 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1511 for (i = 0; i < s->max_b_frames + 1; i++) {
1512 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1514 if (pict_num >= s->rc_context.num_entries)
1516 if (!s->input_picture[i]) {
1517 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1521 s->input_picture[i]->f->pict_type =
1522 s->rc_context.entry[pict_num].new_pict_type;
1526 if (s->b_frame_strategy == 0) {
1527 b_frames = s->max_b_frames;
1528 while (b_frames && !s->input_picture[b_frames])
1530 } else if (s->b_frame_strategy == 1) {
1531 for (i = 1; i < s->max_b_frames + 1; i++) {
1532 if (s->input_picture[i] &&
1533 s->input_picture[i]->b_frame_score == 0) {
1534 s->input_picture[i]->b_frame_score =
1536 s->input_picture[i ]->f->data[0],
1537 s->input_picture[i - 1]->f->data[0],
1541 for (i = 0; i < s->max_b_frames + 1; i++) {
1542 if (!s->input_picture[i] ||
1543 s->input_picture[i]->b_frame_score - 1 >
1544 s->mb_num / s->b_sensitivity)
1548 b_frames = FFMAX(0, i - 1);
1551 for (i = 0; i < b_frames + 1; i++) {
1552 s->input_picture[i]->b_frame_score = 0;
1554 } else if (s->b_frame_strategy == 2) {
1555 b_frames = estimate_best_b_count(s);
1560 for (i = b_frames - 1; i >= 0; i--) {
1561 int type = s->input_picture[i]->f->pict_type;
1562 if (type && type != AV_PICTURE_TYPE_B)
1565 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1566 b_frames == s->max_b_frames) {
1567 av_log(s->avctx, AV_LOG_ERROR,
1568 "warning, too many b frames in a row\n");
1571 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1572 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1573 s->gop_size > s->picture_in_gop_number) {
1574 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1576 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1578 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1582 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1583 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1586 s->reordered_input_picture[0] = s->input_picture[b_frames];
1587 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1588 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1589 s->reordered_input_picture[0]->f->coded_picture_number =
1590 s->coded_picture_number++;
1591 for (i = 0; i < b_frames; i++) {
1592 s->reordered_input_picture[i + 1] = s->input_picture[i];
1593 s->reordered_input_picture[i + 1]->f->pict_type =
1595 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1596 s->coded_picture_number++;
1601 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1603 if (s->reordered_input_picture[0]) {
1604 s->reordered_input_picture[0]->reference =
1605 s->reordered_input_picture[0]->f->pict_type !=
1606 AV_PICTURE_TYPE_B ? 3 : 0;
1608 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1611 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1612 // input is a shared pix, so we can't modifiy it -> alloc a new
1613 // one & ensure that the shared one is reuseable
1616 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1619 pic = &s->picture[i];
1621 pic->reference = s->reordered_input_picture[0]->reference;
1622 if (alloc_picture(s, pic, 0) < 0) {
1626 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1630 /* mark us unused / free shared pic */
1631 av_frame_unref(s->reordered_input_picture[0]->f);
1632 s->reordered_input_picture[0]->shared = 0;
1634 s->current_picture_ptr = pic;
1636 // input is not a shared pix -> reuse buffer for current_pix
1637 s->current_picture_ptr = s->reordered_input_picture[0];
1638 for (i = 0; i < 4; i++) {
1639 s->new_picture.f->data[i] += INPLACE_OFFSET;
1642 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1643 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1644 s->current_picture_ptr)) < 0)
1647 s->picture_number = s->new_picture.f->display_picture_number;
1652 static void frame_end(MpegEncContext *s)
1654 if (s->unrestricted_mv &&
1655 s->current_picture.reference &&
1657 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1658 int hshift = desc->log2_chroma_w;
1659 int vshift = desc->log2_chroma_h;
1660 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1661 s->current_picture.f->linesize[0],
1662 s->h_edge_pos, s->v_edge_pos,
1663 EDGE_WIDTH, EDGE_WIDTH,
1664 EDGE_TOP | EDGE_BOTTOM);
1665 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1666 s->current_picture.f->linesize[1],
1667 s->h_edge_pos >> hshift,
1668 s->v_edge_pos >> vshift,
1669 EDGE_WIDTH >> hshift,
1670 EDGE_WIDTH >> vshift,
1671 EDGE_TOP | EDGE_BOTTOM);
1672 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1673 s->current_picture.f->linesize[2],
1674 s->h_edge_pos >> hshift,
1675 s->v_edge_pos >> vshift,
1676 EDGE_WIDTH >> hshift,
1677 EDGE_WIDTH >> vshift,
1678 EDGE_TOP | EDGE_BOTTOM);
1683 s->last_pict_type = s->pict_type;
1684 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1685 if (s->pict_type!= AV_PICTURE_TYPE_B)
1686 s->last_non_b_pict_type = s->pict_type;
1688 #if FF_API_CODED_FRAME
1689 FF_DISABLE_DEPRECATION_WARNINGS
1690 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1691 FF_ENABLE_DEPRECATION_WARNINGS
1693 #if FF_API_ERROR_FRAME
1694 FF_DISABLE_DEPRECATION_WARNINGS
1695 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1696 sizeof(s->current_picture.encoding_error));
1697 FF_ENABLE_DEPRECATION_WARNINGS
1701 static void update_noise_reduction(MpegEncContext *s)
1705 for (intra = 0; intra < 2; intra++) {
1706 if (s->dct_count[intra] > (1 << 16)) {
1707 for (i = 0; i < 64; i++) {
1708 s->dct_error_sum[intra][i] >>= 1;
1710 s->dct_count[intra] >>= 1;
1713 for (i = 0; i < 64; i++) {
1714 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1715 s->dct_count[intra] +
1716 s->dct_error_sum[intra][i] / 2) /
1717 (s->dct_error_sum[intra][i] + 1);
1722 static int frame_start(MpegEncContext *s)
1726 /* mark & release old frames */
1727 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1728 s->last_picture_ptr != s->next_picture_ptr &&
1729 s->last_picture_ptr->f->buf[0]) {
1730 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1733 s->current_picture_ptr->f->pict_type = s->pict_type;
1734 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1736 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1737 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1738 s->current_picture_ptr)) < 0)
1741 if (s->pict_type != AV_PICTURE_TYPE_B) {
1742 s->last_picture_ptr = s->next_picture_ptr;
1744 s->next_picture_ptr = s->current_picture_ptr;
1747 if (s->last_picture_ptr) {
1748 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1749 if (s->last_picture_ptr->f->buf[0] &&
1750 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1751 s->last_picture_ptr)) < 0)
1754 if (s->next_picture_ptr) {
1755 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1756 if (s->next_picture_ptr->f->buf[0] &&
1757 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1758 s->next_picture_ptr)) < 0)
1762 if (s->picture_structure!= PICT_FRAME) {
1764 for (i = 0; i < 4; i++) {
1765 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1766 s->current_picture.f->data[i] +=
1767 s->current_picture.f->linesize[i];
1769 s->current_picture.f->linesize[i] *= 2;
1770 s->last_picture.f->linesize[i] *= 2;
1771 s->next_picture.f->linesize[i] *= 2;
1775 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1776 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1777 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1778 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1779 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1780 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1782 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1783 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1786 if (s->dct_error_sum) {
1787 av_assert2(s->avctx->noise_reduction && s->encoding);
1788 update_noise_reduction(s);
1794 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1795 const AVFrame *pic_arg, int *got_packet)
1797 MpegEncContext *s = avctx->priv_data;
1798 int i, stuffing_count, ret;
1799 int context_count = s->slice_context_count;
1801 s->vbv_ignore_qmax = 0;
1803 s->picture_in_gop_number++;
1805 if (load_input_picture(s, pic_arg) < 0)
1808 if (select_input_picture(s) < 0) {
1813 if (s->new_picture.f->data[0]) {
1814 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1815 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1817 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1818 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1821 s->mb_info_ptr = av_packet_new_side_data(pkt,
1822 AV_PKT_DATA_H263_MB_INFO,
1823 s->mb_width*s->mb_height*12);
1824 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1827 for (i = 0; i < context_count; i++) {
1828 int start_y = s->thread_context[i]->start_mb_y;
1829 int end_y = s->thread_context[i]-> end_mb_y;
1830 int h = s->mb_height;
1831 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1832 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1834 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1837 s->pict_type = s->new_picture.f->pict_type;
1839 ret = frame_start(s);
1843 ret = encode_picture(s, s->picture_number);
1844 if (growing_buffer) {
1845 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1846 pkt->data = s->pb.buf;
1847 pkt->size = avctx->internal->byte_buffer_size;
1852 #if FF_API_STAT_BITS
1853 FF_DISABLE_DEPRECATION_WARNINGS
1854 avctx->header_bits = s->header_bits;
1855 avctx->mv_bits = s->mv_bits;
1856 avctx->misc_bits = s->misc_bits;
1857 avctx->i_tex_bits = s->i_tex_bits;
1858 avctx->p_tex_bits = s->p_tex_bits;
1859 avctx->i_count = s->i_count;
1860 // FIXME f/b_count in avctx
1861 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1862 avctx->skip_count = s->skip_count;
1863 FF_ENABLE_DEPRECATION_WARNINGS
1868 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1869 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1871 if (avctx->rc_buffer_size) {
1872 RateControlContext *rcc = &s->rc_context;
1873 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1874 int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1875 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1877 if (put_bits_count(&s->pb) > max_size &&
1878 s->lambda < s->lmax) {
1879 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1880 (s->qscale + 1) / s->qscale);
1881 if (s->adaptive_quant) {
1883 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1884 s->lambda_table[i] =
1885 FFMAX(s->lambda_table[i] + min_step,
1886 s->lambda_table[i] * (s->qscale + 1) /
1889 s->mb_skipped = 0; // done in frame_start()
1890 // done in encode_picture() so we must undo it
1891 if (s->pict_type == AV_PICTURE_TYPE_P) {
1892 if (s->flipflop_rounding ||
1893 s->codec_id == AV_CODEC_ID_H263P ||
1894 s->codec_id == AV_CODEC_ID_MPEG4)
1895 s->no_rounding ^= 1;
1897 if (s->pict_type != AV_PICTURE_TYPE_B) {
1898 s->time_base = s->last_time_base;
1899 s->last_non_b_time = s->time - s->pp_time;
1901 for (i = 0; i < context_count; i++) {
1902 PutBitContext *pb = &s->thread_context[i]->pb;
1903 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1905 s->vbv_ignore_qmax = 1;
1906 av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1910 av_assert0(s->avctx->rc_max_rate);
1913 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1914 ff_write_pass1_stats(s);
1916 for (i = 0; i < 4; i++) {
1917 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1918 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1920 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1921 s->current_picture_ptr->encoding_error,
1922 (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1925 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1926 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1927 s->misc_bits + s->i_tex_bits +
1929 flush_put_bits(&s->pb);
1930 s->frame_bits = put_bits_count(&s->pb);
1932 stuffing_count = ff_vbv_update(s, s->frame_bits);
1933 s->stuffing_bits = 8*stuffing_count;
1934 if (stuffing_count) {
1935 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1936 stuffing_count + 50) {
1937 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1941 switch (s->codec_id) {
1942 case AV_CODEC_ID_MPEG1VIDEO:
1943 case AV_CODEC_ID_MPEG2VIDEO:
1944 while (stuffing_count--) {
1945 put_bits(&s->pb, 8, 0);
1948 case AV_CODEC_ID_MPEG4:
1949 put_bits(&s->pb, 16, 0);
1950 put_bits(&s->pb, 16, 0x1C3);
1951 stuffing_count -= 4;
1952 while (stuffing_count--) {
1953 put_bits(&s->pb, 8, 0xFF);
1957 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1959 flush_put_bits(&s->pb);
1960 s->frame_bits = put_bits_count(&s->pb);
1963 /* update mpeg1/2 vbv_delay for CBR */
1964 if (s->avctx->rc_max_rate &&
1965 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1966 s->out_format == FMT_MPEG1 &&
1967 90000LL * (avctx->rc_buffer_size - 1) <=
1968 s->avctx->rc_max_rate * 0xFFFFLL) {
1969 AVCPBProperties *props;
1972 int vbv_delay, min_delay;
1973 double inbits = s->avctx->rc_max_rate *
1974 av_q2d(s->avctx->time_base);
1975 int minbits = s->frame_bits - 8 *
1976 (s->vbv_delay_ptr - s->pb.buf - 1);
1977 double bits = s->rc_context.buffer_index + minbits - inbits;
1980 av_log(s->avctx, AV_LOG_ERROR,
1981 "Internal error, negative bits\n");
1983 assert(s->repeat_first_field == 0);
1985 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1986 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1987 s->avctx->rc_max_rate;
1989 vbv_delay = FFMAX(vbv_delay, min_delay);
1991 av_assert0(vbv_delay < 0xFFFF);
1993 s->vbv_delay_ptr[0] &= 0xF8;
1994 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1995 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1996 s->vbv_delay_ptr[2] &= 0x07;
1997 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1999 props = av_cpb_properties_alloc(&props_size);
2001 return AVERROR(ENOMEM);
2002 props->vbv_delay = vbv_delay * 300;
2004 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2005 (uint8_t*)props, props_size);
2011 #if FF_API_VBV_DELAY
2012 FF_DISABLE_DEPRECATION_WARNINGS
2013 avctx->vbv_delay = vbv_delay * 300;
2014 FF_ENABLE_DEPRECATION_WARNINGS
2017 s->total_bits += s->frame_bits;
2018 #if FF_API_STAT_BITS
2019 FF_DISABLE_DEPRECATION_WARNINGS
2020 avctx->frame_bits = s->frame_bits;
2021 FF_ENABLE_DEPRECATION_WARNINGS
2025 pkt->pts = s->current_picture.f->pts;
2026 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2027 if (!s->current_picture.f->coded_picture_number)
2028 pkt->dts = pkt->pts - s->dts_delta;
2030 pkt->dts = s->reordered_pts;
2031 s->reordered_pts = pkt->pts;
2033 pkt->dts = pkt->pts;
2034 if (s->current_picture.f->key_frame)
2035 pkt->flags |= AV_PKT_FLAG_KEY;
2037 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2042 /* release non-reference frames */
2043 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2044 if (!s->picture[i].reference)
2045 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2048 av_assert1((s->frame_bits & 7) == 0);
2050 pkt->size = s->frame_bits / 8;
2051 *got_packet = !!pkt->size;
2055 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2056 int n, int threshold)
2058 static const char tab[64] = {
2059 3, 2, 2, 1, 1, 1, 1, 1,
2060 1, 1, 1, 1, 1, 1, 1, 1,
2061 1, 1, 1, 1, 1, 1, 1, 1,
2062 0, 0, 0, 0, 0, 0, 0, 0,
2063 0, 0, 0, 0, 0, 0, 0, 0,
2064 0, 0, 0, 0, 0, 0, 0, 0,
2065 0, 0, 0, 0, 0, 0, 0, 0,
2066 0, 0, 0, 0, 0, 0, 0, 0
2071 int16_t *block = s->block[n];
2072 const int last_index = s->block_last_index[n];
2075 if (threshold < 0) {
2077 threshold = -threshold;
2081 /* Are all we could set to zero already zero? */
2082 if (last_index <= skip_dc - 1)
2085 for (i = 0; i <= last_index; i++) {
2086 const int j = s->intra_scantable.permutated[i];
2087 const int level = FFABS(block[j]);
2089 if (skip_dc && i == 0)
2093 } else if (level > 1) {
2099 if (score >= threshold)
2101 for (i = skip_dc; i <= last_index; i++) {
2102 const int j = s->intra_scantable.permutated[i];
2106 s->block_last_index[n] = 0;
2108 s->block_last_index[n] = -1;
2111 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2115 const int maxlevel = s->max_qcoeff;
2116 const int minlevel = s->min_qcoeff;
2120 i = 1; // skip clipping of intra dc
2124 for (; i <= last_index; i++) {
2125 const int j = s->intra_scantable.permutated[i];
2126 int level = block[j];
2128 if (level > maxlevel) {
2131 } else if (level < minlevel) {
2139 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2140 av_log(s->avctx, AV_LOG_INFO,
2141 "warning, clipping %d dct coefficients to %d..%d\n",
2142 overflow, minlevel, maxlevel);
2145 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2149 for (y = 0; y < 8; y++) {
2150 for (x = 0; x < 8; x++) {
2156 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2157 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2158 int v = ptr[x2 + y2 * stride];
2164 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2169 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2170 int motion_x, int motion_y,
2171 int mb_block_height,
2175 int16_t weight[12][64];
2176 int16_t orig[12][64];
2177 const int mb_x = s->mb_x;
2178 const int mb_y = s->mb_y;
2181 int dct_offset = s->linesize * 8; // default for progressive frames
2182 int uv_dct_offset = s->uvlinesize * 8;
2183 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2184 ptrdiff_t wrap_y, wrap_c;
2186 for (i = 0; i < mb_block_count; i++)
2187 skip_dct[i] = s->skipdct;
2189 if (s->adaptive_quant) {
2190 const int last_qp = s->qscale;
2191 const int mb_xy = mb_x + mb_y * s->mb_stride;
2193 s->lambda = s->lambda_table[mb_xy];
2196 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2197 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2198 s->dquant = s->qscale - last_qp;
2200 if (s->out_format == FMT_H263) {
2201 s->dquant = av_clip(s->dquant, -2, 2);
2203 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2205 if (s->pict_type == AV_PICTURE_TYPE_B) {
2206 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2209 if (s->mv_type == MV_TYPE_8X8)
2215 ff_set_qscale(s, last_qp + s->dquant);
2216 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2217 ff_set_qscale(s, s->qscale + s->dquant);
2219 wrap_y = s->linesize;
2220 wrap_c = s->uvlinesize;
2221 ptr_y = s->new_picture.f->data[0] +
2222 (mb_y * 16 * wrap_y) + mb_x * 16;
2223 ptr_cb = s->new_picture.f->data[1] +
2224 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2225 ptr_cr = s->new_picture.f->data[2] +
2226 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2228 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2229 uint8_t *ebuf = s->sc.edge_emu_buffer + 36 * wrap_y;
2230 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2231 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2232 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2234 16, 16, mb_x * 16, mb_y * 16,
2235 s->width, s->height);
2237 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2239 mb_block_width, mb_block_height,
2240 mb_x * mb_block_width, mb_y * mb_block_height,
2242 ptr_cb = ebuf + 16 * wrap_y;
2243 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2245 mb_block_width, mb_block_height,
2246 mb_x * mb_block_width, mb_y * mb_block_height,
2248 ptr_cr = ebuf + 16 * wrap_y + 16;
2252 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2253 int progressive_score, interlaced_score;
2255 s->interlaced_dct = 0;
2256 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2257 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2258 NULL, wrap_y, 8) - 400;
2260 if (progressive_score > 0) {
2261 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2262 NULL, wrap_y * 2, 8) +
2263 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2264 NULL, wrap_y * 2, 8);
2265 if (progressive_score > interlaced_score) {
2266 s->interlaced_dct = 1;
2268 dct_offset = wrap_y;
2269 uv_dct_offset = wrap_c;
2271 if (s->chroma_format == CHROMA_422 ||
2272 s->chroma_format == CHROMA_444)
2278 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2279 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2280 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2281 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2283 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2287 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2288 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2289 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2290 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2291 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2292 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2293 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2294 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2295 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2296 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2297 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2298 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2302 op_pixels_func (*op_pix)[4];
2303 qpel_mc_func (*op_qpix)[16];
2304 uint8_t *dest_y, *dest_cb, *dest_cr;
2306 dest_y = s->dest[0];
2307 dest_cb = s->dest[1];
2308 dest_cr = s->dest[2];
2310 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2311 op_pix = s->hdsp.put_pixels_tab;
2312 op_qpix = s->qdsp.put_qpel_pixels_tab;
2314 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2315 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2318 if (s->mv_dir & MV_DIR_FORWARD) {
2319 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2320 s->last_picture.f->data,
2322 op_pix = s->hdsp.avg_pixels_tab;
2323 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2325 if (s->mv_dir & MV_DIR_BACKWARD) {
2326 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2327 s->next_picture.f->data,
2331 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2332 int progressive_score, interlaced_score;
2334 s->interlaced_dct = 0;
2335 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2336 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2340 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2341 progressive_score -= 400;
2343 if (progressive_score > 0) {
2344 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2346 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2350 if (progressive_score > interlaced_score) {
2351 s->interlaced_dct = 1;
2353 dct_offset = wrap_y;
2354 uv_dct_offset = wrap_c;
2356 if (s->chroma_format == CHROMA_422)
2362 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2363 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2364 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2365 dest_y + dct_offset, wrap_y);
2366 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2367 dest_y + dct_offset + 8, wrap_y);
2369 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2373 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2374 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2375 if (!s->chroma_y_shift) { /* 422 */
2376 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2377 dest_cb + uv_dct_offset, wrap_c);
2378 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2379 dest_cr + uv_dct_offset, wrap_c);
2382 /* pre quantization */
2383 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2384 2 * s->qscale * s->qscale) {
2386 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2388 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2390 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2391 wrap_y, 8) < 20 * s->qscale)
2393 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2394 wrap_y, 8) < 20 * s->qscale)
2396 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2398 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2400 if (!s->chroma_y_shift) { /* 422 */
2401 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2402 dest_cb + uv_dct_offset,
2403 wrap_c, 8) < 20 * s->qscale)
2405 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2406 dest_cr + uv_dct_offset,
2407 wrap_c, 8) < 20 * s->qscale)
2413 if (s->quantizer_noise_shaping) {
2415 get_visual_weight(weight[0], ptr_y , wrap_y);
2417 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2419 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2421 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2423 get_visual_weight(weight[4], ptr_cb , wrap_c);
2425 get_visual_weight(weight[5], ptr_cr , wrap_c);
2426 if (!s->chroma_y_shift) { /* 422 */
2428 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2431 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2434 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2437 /* DCT & quantize */
2438 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2440 for (i = 0; i < mb_block_count; i++) {
2443 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2444 // FIXME we could decide to change to quantizer instead of
2446 // JS: I don't think that would be a good idea it could lower
2447 // quality instead of improve it. Just INTRADC clipping
2448 // deserves changes in quantizer
2450 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2452 s->block_last_index[i] = -1;
2454 if (s->quantizer_noise_shaping) {
2455 for (i = 0; i < mb_block_count; i++) {
2457 s->block_last_index[i] =
2458 dct_quantize_refine(s, s->block[i], weight[i],
2459 orig[i], i, s->qscale);
2464 if (s->luma_elim_threshold && !s->mb_intra)
2465 for (i = 0; i < 4; i++)
2466 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2467 if (s->chroma_elim_threshold && !s->mb_intra)
2468 for (i = 4; i < mb_block_count; i++)
2469 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2471 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2472 for (i = 0; i < mb_block_count; i++) {
2473 if (s->block_last_index[i] == -1)
2474 s->coded_score[i] = INT_MAX / 256;
2479 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2480 s->block_last_index[4] =
2481 s->block_last_index[5] = 0;
2483 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2484 if (!s->chroma_y_shift) { /* 422 / 444 */
2485 for (i=6; i<12; i++) {
2486 s->block_last_index[i] = 0;
2487 s->block[i][0] = s->block[4][0];
2492 // non c quantize code returns incorrect block_last_index FIXME
2493 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2494 for (i = 0; i < mb_block_count; i++) {
2496 if (s->block_last_index[i] > 0) {
2497 for (j = 63; j > 0; j--) {
2498 if (s->block[i][s->intra_scantable.permutated[j]])
2501 s->block_last_index[i] = j;
2506 /* huffman encode */
2507 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2508 case AV_CODEC_ID_MPEG1VIDEO:
2509 case AV_CODEC_ID_MPEG2VIDEO:
2510 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2511 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2513 case AV_CODEC_ID_MPEG4:
2514 if (CONFIG_MPEG4_ENCODER)
2515 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2517 case AV_CODEC_ID_MSMPEG4V2:
2518 case AV_CODEC_ID_MSMPEG4V3:
2519 case AV_CODEC_ID_WMV1:
2520 if (CONFIG_MSMPEG4_ENCODER)
2521 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2523 case AV_CODEC_ID_WMV2:
2524 if (CONFIG_WMV2_ENCODER)
2525 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2527 case AV_CODEC_ID_H261:
2528 if (CONFIG_H261_ENCODER)
2529 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2531 case AV_CODEC_ID_H263:
2532 case AV_CODEC_ID_H263P:
2533 case AV_CODEC_ID_FLV1:
2534 case AV_CODEC_ID_RV10:
2535 case AV_CODEC_ID_RV20:
2536 if (CONFIG_H263_ENCODER)
2537 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2539 case AV_CODEC_ID_MJPEG:
2540 case AV_CODEC_ID_AMV:
2541 if (CONFIG_MJPEG_ENCODER)
2542 ff_mjpeg_encode_mb(s, s->block);
2549 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2551 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2552 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2553 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2556 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2559 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2562 d->mb_skip_run= s->mb_skip_run;
2564 d->last_dc[i] = s->last_dc[i];
2567 d->mv_bits= s->mv_bits;
2568 d->i_tex_bits= s->i_tex_bits;
2569 d->p_tex_bits= s->p_tex_bits;
2570 d->i_count= s->i_count;
2571 d->f_count= s->f_count;
2572 d->b_count= s->b_count;
2573 d->skip_count= s->skip_count;
2574 d->misc_bits= s->misc_bits;
2578 d->qscale= s->qscale;
2579 d->dquant= s->dquant;
2581 d->esc3_level_length= s->esc3_level_length;
2584 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2587 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2588 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2591 d->mb_skip_run= s->mb_skip_run;
2593 d->last_dc[i] = s->last_dc[i];
2596 d->mv_bits= s->mv_bits;
2597 d->i_tex_bits= s->i_tex_bits;
2598 d->p_tex_bits= s->p_tex_bits;
2599 d->i_count= s->i_count;
2600 d->f_count= s->f_count;
2601 d->b_count= s->b_count;
2602 d->skip_count= s->skip_count;
2603 d->misc_bits= s->misc_bits;
2605 d->mb_intra= s->mb_intra;
2606 d->mb_skipped= s->mb_skipped;
2607 d->mv_type= s->mv_type;
2608 d->mv_dir= s->mv_dir;
2610 if(s->data_partitioning){
2612 d->tex_pb= s->tex_pb;
2616 d->block_last_index[i]= s->block_last_index[i];
2617 d->interlaced_dct= s->interlaced_dct;
2618 d->qscale= s->qscale;
2620 d->esc3_level_length= s->esc3_level_length;
2623 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2624 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2625 int *dmin, int *next_block, int motion_x, int motion_y)
2628 uint8_t *dest_backup[3];
2630 copy_context_before_encode(s, backup, type);
2632 s->block= s->blocks[*next_block];
2633 s->pb= pb[*next_block];
2634 if(s->data_partitioning){
2635 s->pb2 = pb2 [*next_block];
2636 s->tex_pb= tex_pb[*next_block];
2640 memcpy(dest_backup, s->dest, sizeof(s->dest));
2641 s->dest[0] = s->sc.rd_scratchpad;
2642 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2643 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2644 av_assert0(s->linesize >= 32); //FIXME
2647 encode_mb(s, motion_x, motion_y);
2649 score= put_bits_count(&s->pb);
2650 if(s->data_partitioning){
2651 score+= put_bits_count(&s->pb2);
2652 score+= put_bits_count(&s->tex_pb);
2655 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2656 ff_mpv_decode_mb(s, s->block);
2658 score *= s->lambda2;
2659 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2663 memcpy(s->dest, dest_backup, sizeof(s->dest));
2670 copy_context_after_encode(best, s, type);
2674 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2675 uint32_t *sq = ff_square_tab + 256;
2680 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2681 else if(w==8 && h==8)
2682 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2686 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2695 static int sse_mb(MpegEncContext *s){
2699 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2700 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2703 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2704 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2705 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2706 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2708 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2709 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2710 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2713 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2714 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2715 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2718 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2719 MpegEncContext *s= *(void**)arg;
2723 s->me.dia_size= s->avctx->pre_dia_size;
2724 s->first_slice_line=1;
2725 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2726 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2727 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2729 s->first_slice_line=0;
2737 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2738 MpegEncContext *s= *(void**)arg;
2740 ff_check_alignment();
2742 s->me.dia_size= s->avctx->dia_size;
2743 s->first_slice_line=1;
2744 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2745 s->mb_x=0; //for block init below
2746 ff_init_block_index(s);
2747 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2748 s->block_index[0]+=2;
2749 s->block_index[1]+=2;
2750 s->block_index[2]+=2;
2751 s->block_index[3]+=2;
2753 /* compute motion vector & mb_type and store in context */
2754 if(s->pict_type==AV_PICTURE_TYPE_B)
2755 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2757 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2759 s->first_slice_line=0;
2764 static int mb_var_thread(AVCodecContext *c, void *arg){
2765 MpegEncContext *s= *(void**)arg;
2768 ff_check_alignment();
2770 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2771 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2774 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2776 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2778 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2779 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2781 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2782 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2783 s->me.mb_var_sum_temp += varc;
2789 static void write_slice_end(MpegEncContext *s){
2790 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2791 if(s->partitioned_frame){
2792 ff_mpeg4_merge_partitions(s);
2795 ff_mpeg4_stuffing(&s->pb);
2796 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2797 ff_mjpeg_encode_stuffing(s);
2800 avpriv_align_put_bits(&s->pb);
2801 flush_put_bits(&s->pb);
2803 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2804 s->misc_bits+= get_bits_diff(s);
2807 static void write_mb_info(MpegEncContext *s)
2809 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2810 int offset = put_bits_count(&s->pb);
2811 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2812 int gobn = s->mb_y / s->gob_index;
2814 if (CONFIG_H263_ENCODER)
2815 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2816 bytestream_put_le32(&ptr, offset);
2817 bytestream_put_byte(&ptr, s->qscale);
2818 bytestream_put_byte(&ptr, gobn);
2819 bytestream_put_le16(&ptr, mba);
2820 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2821 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2822 /* 4MV not implemented */
2823 bytestream_put_byte(&ptr, 0); /* hmv2 */
2824 bytestream_put_byte(&ptr, 0); /* vmv2 */
2827 static void update_mb_info(MpegEncContext *s, int startcode)
2831 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2832 s->mb_info_size += 12;
2833 s->prev_mb_info = s->last_mb_info;
2836 s->prev_mb_info = put_bits_count(&s->pb)/8;
2837 /* This might have incremented mb_info_size above, and we return without
2838 * actually writing any info into that slot yet. But in that case,
2839 * this will be called again at the start of the after writing the
2840 * start code, actually writing the mb info. */
2844 s->last_mb_info = put_bits_count(&s->pb)/8;
2845 if (!s->mb_info_size)
2846 s->mb_info_size += 12;
2850 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2852 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2853 && s->slice_context_count == 1
2854 && s->pb.buf == s->avctx->internal->byte_buffer) {
2855 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2856 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2858 uint8_t *new_buffer = NULL;
2859 int new_buffer_size = 0;
2861 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2862 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2863 return AVERROR(ENOMEM);
2866 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2867 s->avctx->internal->byte_buffer_size + size_increase);
2869 return AVERROR(ENOMEM);
2871 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2872 av_free(s->avctx->internal->byte_buffer);
2873 s->avctx->internal->byte_buffer = new_buffer;
2874 s->avctx->internal->byte_buffer_size = new_buffer_size;
2875 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2876 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2877 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2879 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2880 return AVERROR(EINVAL);
2884 static int encode_thread(AVCodecContext *c, void *arg){
2885 MpegEncContext *s= *(void**)arg;
2886 int mb_x, mb_y, pdif = 0;
2887 int chr_h= 16>>s->chroma_y_shift;
2889 MpegEncContext best_s = { 0 }, backup_s;
2890 uint8_t bit_buf[2][MAX_MB_BYTES];
2891 uint8_t bit_buf2[2][MAX_MB_BYTES];
2892 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2893 PutBitContext pb[2], pb2[2], tex_pb[2];
2895 ff_check_alignment();
2898 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2899 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2900 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2903 s->last_bits= put_bits_count(&s->pb);
2914 /* init last dc values */
2915 /* note: quant matrix value (8) is implied here */
2916 s->last_dc[i] = 128 << s->intra_dc_precision;
2918 s->current_picture.encoding_error[i] = 0;
2920 if(s->codec_id==AV_CODEC_ID_AMV){
2921 s->last_dc[0] = 128*8/13;
2922 s->last_dc[1] = 128*8/14;
2923 s->last_dc[2] = 128*8/14;
2926 memset(s->last_mv, 0, sizeof(s->last_mv));
2930 switch(s->codec_id){
2931 case AV_CODEC_ID_H263:
2932 case AV_CODEC_ID_H263P:
2933 case AV_CODEC_ID_FLV1:
2934 if (CONFIG_H263_ENCODER)
2935 s->gob_index = H263_GOB_HEIGHT(s->height);
2937 case AV_CODEC_ID_MPEG4:
2938 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2939 ff_mpeg4_init_partitions(s);
2945 s->first_slice_line = 1;
2946 s->ptr_lastgob = s->pb.buf;
2947 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2951 ff_set_qscale(s, s->qscale);
2952 ff_init_block_index(s);
2954 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2955 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2956 int mb_type= s->mb_type[xy];
2960 int size_increase = s->avctx->internal->byte_buffer_size/4
2961 + s->mb_width*MAX_MB_BYTES;
2963 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2964 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2965 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2968 if(s->data_partitioning){
2969 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2970 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2971 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2977 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2978 ff_update_block_index(s);
2980 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2981 ff_h261_reorder_mb_index(s);
2982 xy= s->mb_y*s->mb_stride + s->mb_x;
2983 mb_type= s->mb_type[xy];
2986 /* write gob / video packet header */
2988 int current_packet_size, is_gob_start;
2990 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2992 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2994 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2996 switch(s->codec_id){
2997 case AV_CODEC_ID_H263:
2998 case AV_CODEC_ID_H263P:
2999 if(!s->h263_slice_structured)
3000 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3002 case AV_CODEC_ID_MPEG2VIDEO:
3003 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3004 case AV_CODEC_ID_MPEG1VIDEO:
3005 if(s->mb_skip_run) is_gob_start=0;
3007 case AV_CODEC_ID_MJPEG:
3008 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3013 if(s->start_mb_y != mb_y || mb_x!=0){
3016 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3017 ff_mpeg4_init_partitions(s);
3021 av_assert2((put_bits_count(&s->pb)&7) == 0);
3022 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3024 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3025 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3026 int d = 100 / s->error_rate;
3028 current_packet_size=0;
3029 s->pb.buf_ptr= s->ptr_lastgob;
3030 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3034 #if FF_API_RTP_CALLBACK
3035 FF_DISABLE_DEPRECATION_WARNINGS
3036 if (s->avctx->rtp_callback){
3037 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3038 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3040 FF_ENABLE_DEPRECATION_WARNINGS
3042 update_mb_info(s, 1);
3044 switch(s->codec_id){
3045 case AV_CODEC_ID_MPEG4:
3046 if (CONFIG_MPEG4_ENCODER) {
3047 ff_mpeg4_encode_video_packet_header(s);
3048 ff_mpeg4_clean_buffers(s);
3051 case AV_CODEC_ID_MPEG1VIDEO:
3052 case AV_CODEC_ID_MPEG2VIDEO:
3053 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3054 ff_mpeg1_encode_slice_header(s);
3055 ff_mpeg1_clean_buffers(s);
3058 case AV_CODEC_ID_H263:
3059 case AV_CODEC_ID_H263P:
3060 if (CONFIG_H263_ENCODER)
3061 ff_h263_encode_gob_header(s, mb_y);
3065 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3066 int bits= put_bits_count(&s->pb);
3067 s->misc_bits+= bits - s->last_bits;
3071 s->ptr_lastgob += current_packet_size;
3072 s->first_slice_line=1;
3073 s->resync_mb_x=mb_x;
3074 s->resync_mb_y=mb_y;
3078 if( (s->resync_mb_x == s->mb_x)
3079 && s->resync_mb_y+1 == s->mb_y){
3080 s->first_slice_line=0;
3084 s->dquant=0; //only for QP_RD
3086 update_mb_info(s, 0);
3088 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3090 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3092 copy_context_before_encode(&backup_s, s, -1);
3094 best_s.data_partitioning= s->data_partitioning;
3095 best_s.partitioned_frame= s->partitioned_frame;
3096 if(s->data_partitioning){
3097 backup_s.pb2= s->pb2;
3098 backup_s.tex_pb= s->tex_pb;
3101 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3102 s->mv_dir = MV_DIR_FORWARD;
3103 s->mv_type = MV_TYPE_16X16;
3105 s->mv[0][0][0] = s->p_mv_table[xy][0];
3106 s->mv[0][0][1] = s->p_mv_table[xy][1];
3107 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3108 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3110 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3111 s->mv_dir = MV_DIR_FORWARD;
3112 s->mv_type = MV_TYPE_FIELD;
3115 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3116 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3117 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3119 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3120 &dmin, &next_block, 0, 0);
3122 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3123 s->mv_dir = MV_DIR_FORWARD;
3124 s->mv_type = MV_TYPE_16X16;
3128 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3129 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3131 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3132 s->mv_dir = MV_DIR_FORWARD;
3133 s->mv_type = MV_TYPE_8X8;
3136 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3137 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3139 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3140 &dmin, &next_block, 0, 0);
3142 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3143 s->mv_dir = MV_DIR_FORWARD;
3144 s->mv_type = MV_TYPE_16X16;
3146 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3147 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3148 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3149 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3151 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3152 s->mv_dir = MV_DIR_BACKWARD;
3153 s->mv_type = MV_TYPE_16X16;
3155 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3156 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3157 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3158 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3160 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3161 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3162 s->mv_type = MV_TYPE_16X16;
3164 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3165 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3166 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3167 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3168 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3169 &dmin, &next_block, 0, 0);
3171 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3172 s->mv_dir = MV_DIR_FORWARD;
3173 s->mv_type = MV_TYPE_FIELD;
3176 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3177 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3178 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3180 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3181 &dmin, &next_block, 0, 0);
3183 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3184 s->mv_dir = MV_DIR_BACKWARD;
3185 s->mv_type = MV_TYPE_FIELD;
3188 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3189 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3190 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3192 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3193 &dmin, &next_block, 0, 0);
3195 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3196 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3197 s->mv_type = MV_TYPE_FIELD;
3199 for(dir=0; dir<2; dir++){
3201 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3202 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3203 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3206 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3207 &dmin, &next_block, 0, 0);
3209 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3211 s->mv_type = MV_TYPE_16X16;
3215 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3216 &dmin, &next_block, 0, 0);
3217 if(s->h263_pred || s->h263_aic){
3219 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3221 ff_clean_intra_table_entries(s); //old mode?
3225 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3226 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3227 const int last_qp= backup_s.qscale;
3230 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3231 static const int dquant_tab[4]={-1,1,-2,2};
3232 int storecoefs = s->mb_intra && s->dc_val[0];
3234 av_assert2(backup_s.dquant == 0);
3237 s->mv_dir= best_s.mv_dir;
3238 s->mv_type = MV_TYPE_16X16;
3239 s->mb_intra= best_s.mb_intra;
3240 s->mv[0][0][0] = best_s.mv[0][0][0];
3241 s->mv[0][0][1] = best_s.mv[0][0][1];
3242 s->mv[1][0][0] = best_s.mv[1][0][0];
3243 s->mv[1][0][1] = best_s.mv[1][0][1];
3245 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3246 for(; qpi<4; qpi++){
3247 int dquant= dquant_tab[qpi];
3248 qp= last_qp + dquant;
3249 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3251 backup_s.dquant= dquant;
3254 dc[i]= s->dc_val[0][ s->block_index[i] ];
3255 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3259 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3260 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3261 if(best_s.qscale != qp){
3264 s->dc_val[0][ s->block_index[i] ]= dc[i];
3265 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3272 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3273 int mx= s->b_direct_mv_table[xy][0];
3274 int my= s->b_direct_mv_table[xy][1];
3276 backup_s.dquant = 0;
3277 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3279 ff_mpeg4_set_direct_mv(s, mx, my);
3280 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3281 &dmin, &next_block, mx, my);
3283 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3284 backup_s.dquant = 0;
3285 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3287 ff_mpeg4_set_direct_mv(s, 0, 0);
3288 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3289 &dmin, &next_block, 0, 0);
3291 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3294 coded |= s->block_last_index[i];
3297 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3298 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3299 mx=my=0; //FIXME find the one we actually used
3300 ff_mpeg4_set_direct_mv(s, mx, my);
3301 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3309 s->mv_dir= best_s.mv_dir;
3310 s->mv_type = best_s.mv_type;
3312 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3313 s->mv[0][0][1] = best_s.mv[0][0][1];
3314 s->mv[1][0][0] = best_s.mv[1][0][0];
3315 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3318 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3319 &dmin, &next_block, mx, my);
3324 s->current_picture.qscale_table[xy] = best_s.qscale;
3326 copy_context_after_encode(s, &best_s, -1);
3328 pb_bits_count= put_bits_count(&s->pb);
3329 flush_put_bits(&s->pb);
3330 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3333 if(s->data_partitioning){
3334 pb2_bits_count= put_bits_count(&s->pb2);
3335 flush_put_bits(&s->pb2);
3336 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3337 s->pb2= backup_s.pb2;
3339 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3340 flush_put_bits(&s->tex_pb);
3341 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3342 s->tex_pb= backup_s.tex_pb;
3344 s->last_bits= put_bits_count(&s->pb);
3346 if (CONFIG_H263_ENCODER &&
3347 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3348 ff_h263_update_motion_val(s);
3350 if(next_block==0){ //FIXME 16 vs linesize16
3351 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3352 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3353 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3356 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3357 ff_mpv_decode_mb(s, s->block);
3359 int motion_x = 0, motion_y = 0;
3360 s->mv_type=MV_TYPE_16X16;
3361 // only one MB-Type possible
3364 case CANDIDATE_MB_TYPE_INTRA:
3367 motion_x= s->mv[0][0][0] = 0;
3368 motion_y= s->mv[0][0][1] = 0;
3370 case CANDIDATE_MB_TYPE_INTER:
3371 s->mv_dir = MV_DIR_FORWARD;
3373 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3374 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3376 case CANDIDATE_MB_TYPE_INTER_I:
3377 s->mv_dir = MV_DIR_FORWARD;
3378 s->mv_type = MV_TYPE_FIELD;
3381 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3382 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3383 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3386 case CANDIDATE_MB_TYPE_INTER4V:
3387 s->mv_dir = MV_DIR_FORWARD;
3388 s->mv_type = MV_TYPE_8X8;
3391 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3392 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3395 case CANDIDATE_MB_TYPE_DIRECT:
3396 if (CONFIG_MPEG4_ENCODER) {
3397 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3399 motion_x=s->b_direct_mv_table[xy][0];
3400 motion_y=s->b_direct_mv_table[xy][1];
3401 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3404 case CANDIDATE_MB_TYPE_DIRECT0:
3405 if (CONFIG_MPEG4_ENCODER) {
3406 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3408 ff_mpeg4_set_direct_mv(s, 0, 0);
3411 case CANDIDATE_MB_TYPE_BIDIR:
3412 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3414 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3415 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3416 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3417 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3419 case CANDIDATE_MB_TYPE_BACKWARD:
3420 s->mv_dir = MV_DIR_BACKWARD;
3422 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3423 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3425 case CANDIDATE_MB_TYPE_FORWARD:
3426 s->mv_dir = MV_DIR_FORWARD;
3428 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3429 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3431 case CANDIDATE_MB_TYPE_FORWARD_I:
3432 s->mv_dir = MV_DIR_FORWARD;
3433 s->mv_type = MV_TYPE_FIELD;
3436 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3437 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3438 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3441 case CANDIDATE_MB_TYPE_BACKWARD_I:
3442 s->mv_dir = MV_DIR_BACKWARD;
3443 s->mv_type = MV_TYPE_FIELD;
3446 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3447 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3448 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3451 case CANDIDATE_MB_TYPE_BIDIR_I:
3452 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3453 s->mv_type = MV_TYPE_FIELD;
3455 for(dir=0; dir<2; dir++){
3457 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3458 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3459 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3464 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3467 encode_mb(s, motion_x, motion_y);
3469 // RAL: Update last macroblock type
3470 s->last_mv_dir = s->mv_dir;
3472 if (CONFIG_H263_ENCODER &&
3473 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3474 ff_h263_update_motion_val(s);
3476 ff_mpv_decode_mb(s, s->block);
3479 /* clean the MV table in IPS frames for direct mode in B frames */
3480 if(s->mb_intra /* && I,P,S_TYPE */){
3481 s->p_mv_table[xy][0]=0;
3482 s->p_mv_table[xy][1]=0;
3485 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3489 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3490 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3492 s->current_picture.encoding_error[0] += sse(
3493 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3494 s->dest[0], w, h, s->linesize);
3495 s->current_picture.encoding_error[1] += sse(
3496 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3497 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3498 s->current_picture.encoding_error[2] += sse(
3499 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3500 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3503 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3504 ff_h263_loop_filter(s);
3506 ff_dlog(s->avctx, "MB %d %d bits\n",
3507 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3511 //not beautiful here but we must write it before flushing so it has to be here
3512 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3513 ff_msmpeg4_encode_ext_header(s);
3517 #if FF_API_RTP_CALLBACK
3518 FF_DISABLE_DEPRECATION_WARNINGS
3519 /* Send the last GOB if RTP */
3520 if (s->avctx->rtp_callback) {
3521 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3522 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3523 /* Call the RTP callback to send the last GOB */
3525 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3527 FF_ENABLE_DEPRECATION_WARNINGS
3533 #define MERGE(field) dst->field += src->field; src->field=0
3534 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3535 MERGE(me.scene_change_score);
3536 MERGE(me.mc_mb_var_sum_temp);
3537 MERGE(me.mb_var_sum_temp);
3540 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3543 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3544 MERGE(dct_count[1]);
3553 MERGE(er.error_count);
3554 MERGE(padding_bug_score);
3555 MERGE(current_picture.encoding_error[0]);
3556 MERGE(current_picture.encoding_error[1]);
3557 MERGE(current_picture.encoding_error[2]);
3559 if(dst->avctx->noise_reduction){
3560 for(i=0; i<64; i++){
3561 MERGE(dct_error_sum[0][i]);
3562 MERGE(dct_error_sum[1][i]);
3566 assert(put_bits_count(&src->pb) % 8 ==0);
3567 assert(put_bits_count(&dst->pb) % 8 ==0);
3568 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3569 flush_put_bits(&dst->pb);
3572 static int estimate_qp(MpegEncContext *s, int dry_run){
3573 if (s->next_lambda){
3574 s->current_picture_ptr->f->quality =
3575 s->current_picture.f->quality = s->next_lambda;
3576 if(!dry_run) s->next_lambda= 0;
3577 } else if (!s->fixed_qscale) {
3578 s->current_picture_ptr->f->quality =
3579 s->current_picture.f->quality = ff_rate_estimate_qscale(s, dry_run);
3580 if (s->current_picture.f->quality < 0)
3584 if(s->adaptive_quant){
3585 switch(s->codec_id){
3586 case AV_CODEC_ID_MPEG4:
3587 if (CONFIG_MPEG4_ENCODER)
3588 ff_clean_mpeg4_qscales(s);
3590 case AV_CODEC_ID_H263:
3591 case AV_CODEC_ID_H263P:
3592 case AV_CODEC_ID_FLV1:
3593 if (CONFIG_H263_ENCODER)
3594 ff_clean_h263_qscales(s);
3597 ff_init_qscale_tab(s);
3600 s->lambda= s->lambda_table[0];
3603 s->lambda = s->current_picture.f->quality;
3608 /* must be called before writing the header */
3609 static void set_frame_distances(MpegEncContext * s){
3610 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3611 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3613 if(s->pict_type==AV_PICTURE_TYPE_B){
3614 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3615 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3617 s->pp_time= s->time - s->last_non_b_time;
3618 s->last_non_b_time= s->time;
3619 assert(s->picture_number==0 || s->pp_time > 0);
3623 static int encode_picture(MpegEncContext *s, int picture_number)
3627 int context_count = s->slice_context_count;
3629 s->picture_number = picture_number;
3631 /* Reset the average MB variance */
3632 s->me.mb_var_sum_temp =
3633 s->me.mc_mb_var_sum_temp = 0;
3635 /* we need to initialize some time vars before we can encode b-frames */
3636 // RAL: Condition added for MPEG1VIDEO
3637 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3638 set_frame_distances(s);
3639 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3640 ff_set_mpeg4_time(s);
3642 s->me.scene_change_score=0;
3644 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3646 if(s->pict_type==AV_PICTURE_TYPE_I){
3647 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3648 else s->no_rounding=0;
3649 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3650 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3651 s->no_rounding ^= 1;
3654 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3655 if (estimate_qp(s,1) < 0)
3657 ff_get_2pass_fcode(s);
3658 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3659 if(s->pict_type==AV_PICTURE_TYPE_B)
3660 s->lambda= s->last_lambda_for[s->pict_type];
3662 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3666 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3667 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3668 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3669 s->q_chroma_intra_matrix = s->q_intra_matrix;
3670 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3673 s->mb_intra=0; //for the rate distortion & bit compare functions
3674 for(i=1; i<context_count; i++){
3675 ret = ff_update_duplicate_context(s->thread_context[i], s);
3683 /* Estimate motion for every MB */
3684 if(s->pict_type != AV_PICTURE_TYPE_I){
3685 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3686 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3687 if (s->pict_type != AV_PICTURE_TYPE_B) {
3688 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3689 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3693 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3694 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3696 for(i=0; i<s->mb_stride*s->mb_height; i++)
3697 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3699 if(!s->fixed_qscale){
3700 /* finding spatial complexity for I-frame rate control */
3701 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3704 for(i=1; i<context_count; i++){
3705 merge_context_after_me(s, s->thread_context[i]);
3707 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3708 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3711 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3712 s->pict_type= AV_PICTURE_TYPE_I;
3713 for(i=0; i<s->mb_stride*s->mb_height; i++)
3714 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3715 if(s->msmpeg4_version >= 3)
3717 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3718 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3722 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3723 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3725 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3727 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3728 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3729 s->f_code= FFMAX3(s->f_code, a, b);
3732 ff_fix_long_p_mvs(s);
3733 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3734 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3738 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3739 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3744 if(s->pict_type==AV_PICTURE_TYPE_B){
3747 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3748 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3749 s->f_code = FFMAX(a, b);
3751 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3752 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3753 s->b_code = FFMAX(a, b);
3755 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3756 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3757 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3758 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3759 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3761 for(dir=0; dir<2; dir++){
3764 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3765 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3766 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3767 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3775 if (estimate_qp(s, 0) < 0)
3778 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3779 s->pict_type == AV_PICTURE_TYPE_I &&
3780 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3781 s->qscale= 3; //reduce clipping problems
3783 if (s->out_format == FMT_MJPEG) {
3784 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3785 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3787 if (s->avctx->intra_matrix) {
3789 luma_matrix = s->avctx->intra_matrix;
3791 if (s->avctx->chroma_intra_matrix)
3792 chroma_matrix = s->avctx->chroma_intra_matrix;
3794 /* for mjpeg, we do include qscale in the matrix */
3796 int j = s->idsp.idct_permutation[i];
3798 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3799 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3801 s->y_dc_scale_table=
3802 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3803 s->chroma_intra_matrix[0] =
3804 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3805 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3806 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3807 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3808 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3811 if(s->codec_id == AV_CODEC_ID_AMV){
3812 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3813 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3815 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3817 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3818 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3820 s->y_dc_scale_table= y;
3821 s->c_dc_scale_table= c;
3822 s->intra_matrix[0] = 13;
3823 s->chroma_intra_matrix[0] = 14;
3824 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3825 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3826 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3827 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3831 //FIXME var duplication
3832 s->current_picture_ptr->f->key_frame =
3833 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3834 s->current_picture_ptr->f->pict_type =
3835 s->current_picture.f->pict_type = s->pict_type;
3837 if (s->current_picture.f->key_frame)
3838 s->picture_in_gop_number=0;
3840 s->mb_x = s->mb_y = 0;
3841 s->last_bits= put_bits_count(&s->pb);
3842 switch(s->out_format) {
3844 if (CONFIG_MJPEG_ENCODER)
3845 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3846 s->intra_matrix, s->chroma_intra_matrix);
3849 if (CONFIG_H261_ENCODER)
3850 ff_h261_encode_picture_header(s, picture_number);
3853 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3854 ff_wmv2_encode_picture_header(s, picture_number);
3855 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3856 ff_msmpeg4_encode_picture_header(s, picture_number);
3857 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3858 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3861 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3862 ret = ff_rv10_encode_picture_header(s, picture_number);
3866 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3867 ff_rv20_encode_picture_header(s, picture_number);
3868 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3869 ff_flv_encode_picture_header(s, picture_number);
3870 else if (CONFIG_H263_ENCODER)
3871 ff_h263_encode_picture_header(s, picture_number);
3874 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3875 ff_mpeg1_encode_picture_header(s, picture_number);
3880 bits= put_bits_count(&s->pb);
3881 s->header_bits= bits - s->last_bits;
3883 for(i=1; i<context_count; i++){
3884 update_duplicate_context_after_me(s->thread_context[i], s);
3886 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3887 for(i=1; i<context_count; i++){
3888 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3889 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3890 merge_context_after_encode(s, s->thread_context[i]);
3896 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3897 const int intra= s->mb_intra;
3900 s->dct_count[intra]++;
3902 for(i=0; i<64; i++){
3903 int level= block[i];
3907 s->dct_error_sum[intra][i] += level;
3908 level -= s->dct_offset[intra][i];
3909 if(level<0) level=0;
3911 s->dct_error_sum[intra][i] -= level;
3912 level += s->dct_offset[intra][i];
3913 if(level>0) level=0;
3920 static int dct_quantize_trellis_c(MpegEncContext *s,
3921 int16_t *block, int n,
3922 int qscale, int *overflow){
3924 const uint16_t *matrix;
3925 const uint8_t *scantable= s->intra_scantable.scantable;
3926 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3928 unsigned int threshold1, threshold2;
3940 int coeff_count[64];
3941 int qmul, qadd, start_i, last_non_zero, i, dc;
3942 const int esc_length= s->ac_esc_length;
3944 uint8_t * last_length;
3945 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3948 s->fdsp.fdct(block);
3950 if(s->dct_error_sum)
3951 s->denoise_dct(s, block);
3953 qadd= ((qscale-1)|1)*8;
3955 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3956 else mpeg2_qscale = qscale << 1;
3967 /* For AIC we skip quant/dequant of INTRADC */
3972 /* note: block[0] is assumed to be positive */
3973 block[0] = (block[0] + (q >> 1)) / q;
3976 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3977 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3978 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3979 bias= 1<<(QMAT_SHIFT-1);
3981 if (n > 3 && s->intra_chroma_ac_vlc_length) {
3982 length = s->intra_chroma_ac_vlc_length;
3983 last_length= s->intra_chroma_ac_vlc_last_length;
3985 length = s->intra_ac_vlc_length;
3986 last_length= s->intra_ac_vlc_last_length;
3991 qmat = s->q_inter_matrix[qscale];
3992 matrix = s->inter_matrix;
3993 length = s->inter_ac_vlc_length;
3994 last_length= s->inter_ac_vlc_last_length;
3998 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3999 threshold2= (threshold1<<1);
4001 for(i=63; i>=start_i; i--) {
4002 const int j = scantable[i];
4003 int level = block[j] * qmat[j];
4005 if(((unsigned)(level+threshold1))>threshold2){
4011 for(i=start_i; i<=last_non_zero; i++) {
4012 const int j = scantable[i];
4013 int level = block[j] * qmat[j];
4015 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4016 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4017 if(((unsigned)(level+threshold1))>threshold2){
4019 level= (bias + level)>>QMAT_SHIFT;
4021 coeff[1][i]= level-1;
4022 // coeff[2][k]= level-2;
4024 level= (bias - level)>>QMAT_SHIFT;
4025 coeff[0][i]= -level;
4026 coeff[1][i]= -level+1;
4027 // coeff[2][k]= -level+2;
4029 coeff_count[i]= FFMIN(level, 2);
4030 av_assert2(coeff_count[i]);
4033 coeff[0][i]= (level>>31)|1;
4038 *overflow= s->max_qcoeff < max; //overflow might have happened
4040 if(last_non_zero < start_i){
4041 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4042 return last_non_zero;
4045 score_tab[start_i]= 0;
4046 survivor[0]= start_i;
4049 for(i=start_i; i<=last_non_zero; i++){
4050 int level_index, j, zero_distortion;
4051 int dct_coeff= FFABS(block[ scantable[i] ]);
4052 int best_score=256*256*256*120;
4054 if (s->fdsp.fdct == ff_fdct_ifast)
4055 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4056 zero_distortion= dct_coeff*dct_coeff;
4058 for(level_index=0; level_index < coeff_count[i]; level_index++){
4060 int level= coeff[level_index][i];
4061 const int alevel= FFABS(level);
4066 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4067 unquant_coeff= alevel*qmul + qadd;
4068 } else if(s->out_format == FMT_MJPEG) {
4069 j = s->idsp.idct_permutation[scantable[i]];
4070 unquant_coeff = alevel * matrix[j] * 8;
4072 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4074 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4075 unquant_coeff = (unquant_coeff - 1) | 1;
4077 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4078 unquant_coeff = (unquant_coeff - 1) | 1;
4083 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4085 if((level&(~127)) == 0){
4086 for(j=survivor_count-1; j>=0; j--){
4087 int run= i - survivor[j];
4088 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4089 score += score_tab[i-run];
4091 if(score < best_score){
4094 level_tab[i+1]= level-64;
4098 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4099 for(j=survivor_count-1; j>=0; j--){
4100 int run= i - survivor[j];
4101 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4102 score += score_tab[i-run];
4103 if(score < last_score){
4106 last_level= level-64;
4112 distortion += esc_length*lambda;
4113 for(j=survivor_count-1; j>=0; j--){
4114 int run= i - survivor[j];
4115 int score= distortion + score_tab[i-run];
4117 if(score < best_score){
4120 level_tab[i+1]= level-64;
4124 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4125 for(j=survivor_count-1; j>=0; j--){
4126 int run= i - survivor[j];
4127 int score= distortion + score_tab[i-run];
4128 if(score < last_score){
4131 last_level= level-64;
4139 score_tab[i+1]= best_score;
4141 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
4142 if(last_non_zero <= 27){
4143 for(; survivor_count; survivor_count--){
4144 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4148 for(; survivor_count; survivor_count--){
4149 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4154 survivor[ survivor_count++ ]= i+1;
4157 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4158 last_score= 256*256*256*120;
4159 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4160 int score= score_tab[i];
4161 if(i) score += lambda*2; //FIXME exacter?
4163 if(score < last_score){
4166 last_level= level_tab[i];
4167 last_run= run_tab[i];
4172 s->coded_score[n] = last_score;
4174 dc= FFABS(block[0]);
4175 last_non_zero= last_i - 1;
4176 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4178 if(last_non_zero < start_i)
4179 return last_non_zero;
4181 if(last_non_zero == 0 && start_i == 0){
4183 int best_score= dc * dc;
4185 for(i=0; i<coeff_count[0]; i++){
4186 int level= coeff[i][0];
4187 int alevel= FFABS(level);
4188 int unquant_coeff, score, distortion;
4190 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4191 unquant_coeff= (alevel*qmul + qadd)>>3;
4193 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4194 unquant_coeff = (unquant_coeff - 1) | 1;
4196 unquant_coeff = (unquant_coeff + 4) >> 3;
4197 unquant_coeff<<= 3 + 3;
4199 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4201 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4202 else score= distortion + esc_length*lambda;
4204 if(score < best_score){
4206 best_level= level - 64;
4209 block[0]= best_level;
4210 s->coded_score[n] = best_score - dc*dc;
4211 if(best_level == 0) return -1;
4212 else return last_non_zero;
4216 av_assert2(last_level);
4218 block[ perm_scantable[last_non_zero] ]= last_level;
4221 for(; i>start_i; i -= run_tab[i] + 1){
4222 block[ perm_scantable[i-1] ]= level_tab[i];
4225 return last_non_zero;
4228 //#define REFINE_STATS 1
4229 static int16_t basis[64][64];
4231 static void build_basis(uint8_t *perm){
4238 double s= 0.25*(1<<BASIS_SHIFT);
4240 int perm_index= perm[index];
4241 if(i==0) s*= sqrt(0.5);
4242 if(j==0) s*= sqrt(0.5);
4243 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4250 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4251 int16_t *block, int16_t *weight, int16_t *orig,
4254 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4255 const uint8_t *scantable= s->intra_scantable.scantable;
4256 const uint8_t *perm_scantable= s->intra_scantable.permutated;
4257 // unsigned int threshold1, threshold2;
4262 int qmul, qadd, start_i, last_non_zero, i, dc;
4264 uint8_t * last_length;
4266 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4269 static int after_last=0;
4270 static int to_zero=0;
4271 static int from_zero=0;
4274 static int messed_sign=0;
4277 if(basis[0][0] == 0)
4278 build_basis(s->idsp.idct_permutation);
4289 /* For AIC we skip quant/dequant of INTRADC */
4293 q <<= RECON_SHIFT-3;
4294 /* note: block[0] is assumed to be positive */
4296 // block[0] = (block[0] + (q >> 1)) / q;
4298 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4299 // bias= 1<<(QMAT_SHIFT-1);
4300 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4301 length = s->intra_chroma_ac_vlc_length;
4302 last_length= s->intra_chroma_ac_vlc_last_length;
4304 length = s->intra_ac_vlc_length;
4305 last_length= s->intra_ac_vlc_last_length;
4310 length = s->inter_ac_vlc_length;
4311 last_length= s->inter_ac_vlc_last_length;
4313 last_non_zero = s->block_last_index[n];
4318 dc += (1<<(RECON_SHIFT-1));
4319 for(i=0; i<64; i++){
4320 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
4323 STOP_TIMER("memset rem[]")}
4326 for(i=0; i<64; i++){
4331 w= FFABS(weight[i]) + qns*one;
4332 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4335 // w=weight[i] = (63*qns + (w/2)) / w;
4338 av_assert2(w<(1<<6));
4341 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4347 for(i=start_i; i<=last_non_zero; i++){
4348 int j= perm_scantable[i];
4349 const int level= block[j];
4353 if(level<0) coeff= qmul*level - qadd;
4354 else coeff= qmul*level + qadd;
4355 run_tab[rle_index++]=run;
4358 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4364 if(last_non_zero>0){
4365 STOP_TIMER("init rem[]")
4372 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4375 int run2, best_unquant_change=0, analyze_gradient;
4379 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4381 if(analyze_gradient){
4385 for(i=0; i<64; i++){
4388 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4391 STOP_TIMER("rem*w*w")}
4401 const int level= block[0];
4402 int change, old_coeff;
4404 av_assert2(s->mb_intra);
4408 for(change=-1; change<=1; change+=2){
4409 int new_level= level + change;
4410 int score, new_coeff;
4412 new_coeff= q*new_level;
4413 if(new_coeff >= 2048 || new_coeff < 0)
4416 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4417 new_coeff - old_coeff);
4418 if(score<best_score){
4421 best_change= change;
4422 best_unquant_change= new_coeff - old_coeff;
4429 run2= run_tab[rle_index++];
4433 for(i=start_i; i<64; i++){
4434 int j= perm_scantable[i];
4435 const int level= block[j];
4436 int change, old_coeff;
4438 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4442 if(level<0) old_coeff= qmul*level - qadd;
4443 else old_coeff= qmul*level + qadd;
4444 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4448 av_assert2(run2>=0 || i >= last_non_zero );
4451 for(change=-1; change<=1; change+=2){
4452 int new_level= level + change;
4453 int score, new_coeff, unquant_change;
4456 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4460 if(new_level<0) new_coeff= qmul*new_level - qadd;
4461 else new_coeff= qmul*new_level + qadd;
4462 if(new_coeff >= 2048 || new_coeff <= -2048)
4464 //FIXME check for overflow
4467 if(level < 63 && level > -63){
4468 if(i < last_non_zero)
4469 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4470 - length[UNI_AC_ENC_INDEX(run, level+64)];
4472 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4473 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4476 av_assert2(FFABS(new_level)==1);
4478 if(analyze_gradient){
4479 int g= d1[ scantable[i] ];
4480 if(g && (g^new_level) >= 0)
4484 if(i < last_non_zero){
4485 int next_i= i + run2 + 1;
4486 int next_level= block[ perm_scantable[next_i] ] + 64;
4488 if(next_level&(~127))
4491 if(next_i < last_non_zero)
4492 score += length[UNI_AC_ENC_INDEX(run, 65)]
4493 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4494 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4496 score += length[UNI_AC_ENC_INDEX(run, 65)]
4497 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4498 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4500 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4502 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4503 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4509 av_assert2(FFABS(level)==1);
4511 if(i < last_non_zero){
4512 int next_i= i + run2 + 1;
4513 int next_level= block[ perm_scantable[next_i] ] + 64;
4515 if(next_level&(~127))
4518 if(next_i < last_non_zero)
4519 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4520 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4521 - length[UNI_AC_ENC_INDEX(run, 65)];
4523 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4524 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4525 - length[UNI_AC_ENC_INDEX(run, 65)];
4527 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4529 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4530 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4537 unquant_change= new_coeff - old_coeff;
4538 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4540 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4542 if(score<best_score){
4545 best_change= change;
4546 best_unquant_change= unquant_change;
4550 prev_level= level + 64;
4551 if(prev_level&(~127))
4560 STOP_TIMER("iterative step")}
4564 int j= perm_scantable[ best_coeff ];
4566 block[j] += best_change;
4568 if(best_coeff > last_non_zero){
4569 last_non_zero= best_coeff;
4570 av_assert2(block[j]);
4577 if(block[j] - best_change){
4578 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4590 for(; last_non_zero>=start_i; last_non_zero--){
4591 if(block[perm_scantable[last_non_zero]])
4597 if(256*256*256*64 % count == 0){
4598 av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4603 for(i=start_i; i<=last_non_zero; i++){
4604 int j= perm_scantable[i];
4605 const int level= block[j];
4608 run_tab[rle_index++]=run;
4615 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4621 if(last_non_zero>0){
4622 STOP_TIMER("iterative search")
4627 return last_non_zero;
4631 * Permute an 8x8 block according to permuatation.
4632 * @param block the block which will be permuted according to
4633 * the given permutation vector
4634 * @param permutation the permutation vector
4635 * @param last the last non zero coefficient in scantable order, used to
4636 * speed the permutation up
4637 * @param scantable the used scantable, this is only used to speed the
4638 * permutation up, the block is not (inverse) permutated
4639 * to scantable order!
4641 void ff_block_permute(int16_t *block, uint8_t *permutation,
4642 const uint8_t *scantable, int last)
4649 //FIXME it is ok but not clean and might fail for some permutations
4650 // if (permutation[1] == 1)
4653 for (i = 0; i <= last; i++) {
4654 const int j = scantable[i];
4659 for (i = 0; i <= last; i++) {
4660 const int j = scantable[i];
4661 const int perm_j = permutation[j];
4662 block[perm_j] = temp[j];
4666 int ff_dct_quantize_c(MpegEncContext *s,
4667 int16_t *block, int n,
4668 int qscale, int *overflow)
4670 int i, j, level, last_non_zero, q, start_i;
4672 const uint8_t *scantable= s->intra_scantable.scantable;
4675 unsigned int threshold1, threshold2;
4677 s->fdsp.fdct(block);
4679 if(s->dct_error_sum)
4680 s->denoise_dct(s, block);
4690 /* For AIC we skip quant/dequant of INTRADC */
4693 /* note: block[0] is assumed to be positive */
4694 block[0] = (block[0] + (q >> 1)) / q;
4697 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4698 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4702 qmat = s->q_inter_matrix[qscale];
4703 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4705 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4706 threshold2= (threshold1<<1);
4707 for(i=63;i>=start_i;i--) {
4709 level = block[j] * qmat[j];
4711 if(((unsigned)(level+threshold1))>threshold2){
4718 for(i=start_i; i<=last_non_zero; i++) {
4720 level = block[j] * qmat[j];
4722 // if( bias+level >= (1<<QMAT_SHIFT)
4723 // || bias-level >= (1<<QMAT_SHIFT)){
4724 if(((unsigned)(level+threshold1))>threshold2){
4726 level= (bias + level)>>QMAT_SHIFT;
4729 level= (bias - level)>>QMAT_SHIFT;
4737 *overflow= s->max_qcoeff < max; //overflow might have happened
4739 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4740 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4741 ff_block_permute(block, s->idsp.idct_permutation,
4742 scantable, last_non_zero);
4744 return last_non_zero;
4747 #define OFFSET(x) offsetof(MpegEncContext, x)
4748 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4749 static const AVOption h263_options[] = {
4750 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4751 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4756 static const AVClass h263_class = {
4757 .class_name = "H.263 encoder",
4758 .item_name = av_default_item_name,
4759 .option = h263_options,
4760 .version = LIBAVUTIL_VERSION_INT,
4763 AVCodec ff_h263_encoder = {
4765 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4766 .type = AVMEDIA_TYPE_VIDEO,
4767 .id = AV_CODEC_ID_H263,
4768 .priv_data_size = sizeof(MpegEncContext),
4769 .init = ff_mpv_encode_init,
4770 .encode2 = ff_mpv_encode_picture,
4771 .close = ff_mpv_encode_end,
4772 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4773 .priv_class = &h263_class,
4776 static const AVOption h263p_options[] = {
4777 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4778 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4779 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4780 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4784 static const AVClass h263p_class = {
4785 .class_name = "H.263p encoder",
4786 .item_name = av_default_item_name,
4787 .option = h263p_options,
4788 .version = LIBAVUTIL_VERSION_INT,
4791 AVCodec ff_h263p_encoder = {
4793 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4794 .type = AVMEDIA_TYPE_VIDEO,
4795 .id = AV_CODEC_ID_H263P,
4796 .priv_data_size = sizeof(MpegEncContext),
4797 .init = ff_mpv_encode_init,
4798 .encode2 = ff_mpv_encode_picture,
4799 .close = ff_mpv_encode_end,
4800 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4801 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4802 .priv_class = &h263p_class,
4805 static const AVClass msmpeg4v2_class = {
4806 .class_name = "msmpeg4v2 encoder",
4807 .item_name = av_default_item_name,
4808 .option = ff_mpv_generic_options,
4809 .version = LIBAVUTIL_VERSION_INT,
4812 AVCodec ff_msmpeg4v2_encoder = {
4813 .name = "msmpeg4v2",
4814 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4815 .type = AVMEDIA_TYPE_VIDEO,
4816 .id = AV_CODEC_ID_MSMPEG4V2,
4817 .priv_data_size = sizeof(MpegEncContext),
4818 .init = ff_mpv_encode_init,
4819 .encode2 = ff_mpv_encode_picture,
4820 .close = ff_mpv_encode_end,
4821 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4822 .priv_class = &msmpeg4v2_class,
4825 static const AVClass msmpeg4v3_class = {
4826 .class_name = "msmpeg4v3 encoder",
4827 .item_name = av_default_item_name,
4828 .option = ff_mpv_generic_options,
4829 .version = LIBAVUTIL_VERSION_INT,
4832 AVCodec ff_msmpeg4v3_encoder = {
4834 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4835 .type = AVMEDIA_TYPE_VIDEO,
4836 .id = AV_CODEC_ID_MSMPEG4V3,
4837 .priv_data_size = sizeof(MpegEncContext),
4838 .init = ff_mpv_encode_init,
4839 .encode2 = ff_mpv_encode_picture,
4840 .close = ff_mpv_encode_end,
4841 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4842 .priv_class = &msmpeg4v3_class,
4845 static const AVClass wmv1_class = {
4846 .class_name = "wmv1 encoder",
4847 .item_name = av_default_item_name,
4848 .option = ff_mpv_generic_options,
4849 .version = LIBAVUTIL_VERSION_INT,
4852 AVCodec ff_wmv1_encoder = {
4854 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4855 .type = AVMEDIA_TYPE_VIDEO,
4856 .id = AV_CODEC_ID_WMV1,
4857 .priv_data_size = sizeof(MpegEncContext),
4858 .init = ff_mpv_encode_init,
4859 .encode2 = ff_mpv_encode_picture,
4860 .close = ff_mpv_encode_end,
4861 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4862 .priv_class = &wmv1_class,