2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "libavutil/timer.h"
46 #include "mpegvideo.h"
47 #include "mpegvideodata.h"
51 #include "mjpegenc_common.h"
53 #include "mpegutils.h"
56 #include "pixblockdsp.h"
60 #include "aandcttab.h"
62 #include "mpeg4video.h"
64 #include "bytestream.h"
70 #define QUANT_BIAS_SHIFT 8
72 #define QMAT_SHIFT_MMX 16
75 static int encode_picture(MpegEncContext *s, int picture_number);
76 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
77 static int sse_mb(MpegEncContext *s);
78 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
79 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
81 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
82 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
84 const AVOption ff_mpv_generic_options[] = {
89 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
90 uint16_t (*qmat16)[2][64],
91 const uint16_t *quant_matrix,
92 int bias, int qmin, int qmax, int intra)
94 FDCTDSPContext *fdsp = &s->fdsp;
98 for (qscale = qmin; qscale <= qmax; qscale++) {
102 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
103 else qscale2 = qscale << 1;
105 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
107 fdsp->fdct == ff_faandct ||
108 #endif /* CONFIG_FAANDCT */
109 fdsp->fdct == ff_jpeg_fdct_islow_10) {
110 for (i = 0; i < 64; i++) {
111 const int j = s->idsp.idct_permutation[i];
112 int64_t den = (int64_t) qscale2 * quant_matrix[j];
113 /* 16 <= qscale * quant_matrix[i] <= 7905
114 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
115 * 19952 <= x <= 249205026
116 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
117 * 3444240 >= (1 << 36) / (x) >= 275 */
119 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
121 } else if (fdsp->fdct == ff_fdct_ifast) {
122 for (i = 0; i < 64; i++) {
123 const int j = s->idsp.idct_permutation[i];
124 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
125 /* 16 <= qscale * quant_matrix[i] <= 7905
126 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
127 * 19952 <= x <= 249205026
128 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
129 * 3444240 >= (1 << 36) / (x) >= 275 */
131 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
134 for (i = 0; i < 64; i++) {
135 const int j = s->idsp.idct_permutation[i];
136 int64_t den = (int64_t) qscale2 * quant_matrix[j];
137 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
138 * Assume x = qscale * quant_matrix[i]
140 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
141 * so 32768 >= (1 << 19) / (x) >= 67 */
142 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
143 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
144 // (qscale * quant_matrix[i]);
145 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
147 if (qmat16[qscale][0][i] == 0 ||
148 qmat16[qscale][0][i] == 128 * 256)
149 qmat16[qscale][0][i] = 128 * 256 - 1;
150 qmat16[qscale][1][i] =
151 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
152 qmat16[qscale][0][i]);
156 for (i = intra; i < 64; i++) {
158 if (fdsp->fdct == ff_fdct_ifast) {
159 max = (8191LL * ff_aanscales[i]) >> 14;
161 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
167 av_log(NULL, AV_LOG_INFO,
168 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
173 static inline void update_qscale(MpegEncContext *s)
175 if (s->q_scale_type == 1 && 0) {
177 int bestdiff=INT_MAX;
180 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
181 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
182 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
183 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
185 if (diff < bestdiff) {
192 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
193 (FF_LAMBDA_SHIFT + 7);
194 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
197 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
201 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
207 for (i = 0; i < 64; i++) {
208 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
215 * init s->current_picture.qscale_table from s->lambda_table
217 void ff_init_qscale_tab(MpegEncContext *s)
219 int8_t * const qscale_table = s->current_picture.qscale_table;
222 for (i = 0; i < s->mb_num; i++) {
223 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
224 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
225 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
230 static void update_duplicate_context_after_me(MpegEncContext *dst,
233 #define COPY(a) dst->a= src->a
235 COPY(current_picture);
241 COPY(picture_in_gop_number);
242 COPY(gop_picture_number);
243 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
244 COPY(progressive_frame); // FIXME don't set in encode_header
245 COPY(partitioned_frame); // FIXME don't set in encode_header
250 * Set the given MpegEncContext to defaults for encoding.
251 * the changed fields will not depend upon the prior state of the MpegEncContext.
253 static void mpv_encode_defaults(MpegEncContext *s)
256 ff_mpv_common_defaults(s);
258 for (i = -16; i < 16; i++) {
259 default_fcode_tab[i + MAX_MV] = 1;
261 s->me.mv_penalty = default_mv_penalty;
262 s->fcode_tab = default_fcode_tab;
264 s->input_picture_number = 0;
265 s->picture_in_gop_number = 0;
268 av_cold int ff_dct_encode_init(MpegEncContext *s) {
270 ff_dct_encode_init_x86(s);
272 if (CONFIG_H263_ENCODER)
273 ff_h263dsp_init(&s->h263dsp);
274 if (!s->dct_quantize)
275 s->dct_quantize = ff_dct_quantize_c;
277 s->denoise_dct = denoise_dct_c;
278 s->fast_dct_quantize = s->dct_quantize;
279 if (s->avctx->trellis)
280 s->dct_quantize = dct_quantize_trellis_c;
285 /* init video encoder */
286 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
288 MpegEncContext *s = avctx->priv_data;
289 AVCPBProperties *cpb_props;
290 int i, ret, format_supported;
292 mpv_encode_defaults(s);
294 switch (avctx->codec_id) {
295 case AV_CODEC_ID_MPEG2VIDEO:
296 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
297 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
298 av_log(avctx, AV_LOG_ERROR,
299 "only YUV420 and YUV422 are supported\n");
303 case AV_CODEC_ID_MJPEG:
304 case AV_CODEC_ID_AMV:
305 format_supported = 0;
306 /* JPEG color space */
307 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
308 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
309 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
310 (avctx->color_range == AVCOL_RANGE_JPEG &&
311 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
312 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
313 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
314 format_supported = 1;
315 /* MPEG color space */
316 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
317 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
318 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
319 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
320 format_supported = 1;
322 if (!format_supported) {
323 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
328 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
329 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
334 switch (avctx->pix_fmt) {
335 case AV_PIX_FMT_YUVJ444P:
336 case AV_PIX_FMT_YUV444P:
337 s->chroma_format = CHROMA_444;
339 case AV_PIX_FMT_YUVJ422P:
340 case AV_PIX_FMT_YUV422P:
341 s->chroma_format = CHROMA_422;
343 case AV_PIX_FMT_YUVJ420P:
344 case AV_PIX_FMT_YUV420P:
346 s->chroma_format = CHROMA_420;
350 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
351 s->bit_rate = avctx->bit_rate;
352 s->width = avctx->width;
353 s->height = avctx->height;
354 if (avctx->gop_size > 600 &&
355 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
356 av_log(avctx, AV_LOG_WARNING,
357 "keyframe interval too large!, reducing it from %d to %d\n",
358 avctx->gop_size, 600);
359 avctx->gop_size = 600;
361 s->gop_size = avctx->gop_size;
363 if (avctx->max_b_frames > MAX_B_FRAMES) {
364 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
365 "is %d.\n", MAX_B_FRAMES);
366 avctx->max_b_frames = MAX_B_FRAMES;
368 s->max_b_frames = avctx->max_b_frames;
369 s->codec_id = avctx->codec->id;
370 s->strict_std_compliance = avctx->strict_std_compliance;
371 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
372 s->mpeg_quant = avctx->mpeg_quant;
373 s->rtp_mode = !!avctx->rtp_payload_size;
374 s->intra_dc_precision = avctx->intra_dc_precision;
376 // workaround some differences between how applications specify dc precision
377 if (s->intra_dc_precision < 0) {
378 s->intra_dc_precision += 8;
379 } else if (s->intra_dc_precision >= 8)
380 s->intra_dc_precision -= 8;
382 if (s->intra_dc_precision < 0) {
383 av_log(avctx, AV_LOG_ERROR,
384 "intra dc precision must be positive, note some applications use"
385 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
386 return AVERROR(EINVAL);
389 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
390 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
391 return AVERROR(EINVAL);
393 s->user_specified_pts = AV_NOPTS_VALUE;
395 if (s->gop_size <= 1) {
402 #if FF_API_MOTION_EST
403 FF_DISABLE_DEPRECATION_WARNINGS
404 s->me_method = avctx->me_method;
405 FF_ENABLE_DEPRECATION_WARNINGS
409 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
412 FF_DISABLE_DEPRECATION_WARNINGS
413 if (avctx->border_masking != 0.0)
414 s->border_masking = avctx->border_masking;
415 FF_ENABLE_DEPRECATION_WARNINGS
418 s->adaptive_quant = (s->avctx->lumi_masking ||
419 s->avctx->dark_masking ||
420 s->avctx->temporal_cplx_masking ||
421 s->avctx->spatial_cplx_masking ||
422 s->avctx->p_masking ||
424 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
427 s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
429 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
430 switch(avctx->codec_id) {
431 case AV_CODEC_ID_MPEG1VIDEO:
432 case AV_CODEC_ID_MPEG2VIDEO:
433 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
435 case AV_CODEC_ID_MPEG4:
436 case AV_CODEC_ID_MSMPEG4V1:
437 case AV_CODEC_ID_MSMPEG4V2:
438 case AV_CODEC_ID_MSMPEG4V3:
439 if (avctx->rc_max_rate >= 15000000) {
440 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
441 } else if(avctx->rc_max_rate >= 2000000) {
442 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
443 } else if(avctx->rc_max_rate >= 384000) {
444 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
446 avctx->rc_buffer_size = 40;
447 avctx->rc_buffer_size *= 16384;
450 if (avctx->rc_buffer_size) {
451 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
455 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
456 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
460 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
461 av_log(avctx, AV_LOG_INFO,
462 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
465 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
466 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
470 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
471 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
475 if (avctx->rc_max_rate &&
476 avctx->rc_max_rate == avctx->bit_rate &&
477 avctx->rc_max_rate != avctx->rc_min_rate) {
478 av_log(avctx, AV_LOG_INFO,
479 "impossible bitrate constraints, this will fail\n");
482 if (avctx->rc_buffer_size &&
483 avctx->bit_rate * (int64_t)avctx->time_base.num >
484 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
485 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
489 if (!s->fixed_qscale &&
490 avctx->bit_rate * av_q2d(avctx->time_base) >
491 avctx->bit_rate_tolerance) {
492 av_log(avctx, AV_LOG_WARNING,
493 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, (int64_t)avctx->bit_rate);
494 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
497 if (s->avctx->rc_max_rate &&
498 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
499 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
500 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
501 90000LL * (avctx->rc_buffer_size - 1) >
502 s->avctx->rc_max_rate * 0xFFFFLL) {
503 av_log(avctx, AV_LOG_INFO,
504 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
505 "specified vbv buffer is too large for the given bitrate!\n");
508 if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
509 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
510 s->codec_id != AV_CODEC_ID_FLV1) {
511 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
515 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
516 av_log(avctx, AV_LOG_ERROR,
517 "OBMC is only supported with simple mb decision\n");
521 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
522 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
526 if (s->max_b_frames &&
527 s->codec_id != AV_CODEC_ID_MPEG4 &&
528 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
529 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
530 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
533 if (s->max_b_frames < 0) {
534 av_log(avctx, AV_LOG_ERROR,
535 "max b frames must be 0 or positive for mpegvideo based encoders\n");
539 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
540 s->codec_id == AV_CODEC_ID_H263 ||
541 s->codec_id == AV_CODEC_ID_H263P) &&
542 (avctx->sample_aspect_ratio.num > 255 ||
543 avctx->sample_aspect_ratio.den > 255)) {
544 av_log(avctx, AV_LOG_WARNING,
545 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
546 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
547 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
548 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
551 if ((s->codec_id == AV_CODEC_ID_H263 ||
552 s->codec_id == AV_CODEC_ID_H263P) &&
553 (avctx->width > 2048 ||
554 avctx->height > 1152 )) {
555 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
558 if ((s->codec_id == AV_CODEC_ID_H263 ||
559 s->codec_id == AV_CODEC_ID_H263P) &&
560 ((avctx->width &3) ||
561 (avctx->height&3) )) {
562 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
566 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
567 (avctx->width > 4095 ||
568 avctx->height > 4095 )) {
569 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
573 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
574 (avctx->width > 16383 ||
575 avctx->height > 16383 )) {
576 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
580 if (s->codec_id == AV_CODEC_ID_RV10 &&
582 avctx->height&15 )) {
583 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
584 return AVERROR(EINVAL);
587 if (s->codec_id == AV_CODEC_ID_RV20 &&
590 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
591 return AVERROR(EINVAL);
594 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
595 s->codec_id == AV_CODEC_ID_WMV2) &&
597 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
601 if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
602 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
603 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
607 // FIXME mpeg2 uses that too
608 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
609 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
610 av_log(avctx, AV_LOG_ERROR,
611 "mpeg2 style quantization not supported by codec\n");
615 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
616 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
620 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
621 s->avctx->mb_decision != FF_MB_DECISION_RD) {
622 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
626 if (s->avctx->scenechange_threshold < 1000000000 &&
627 (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
628 av_log(avctx, AV_LOG_ERROR,
629 "closed gop with scene change detection are not supported yet, "
630 "set threshold to 1000000000\n");
634 if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
635 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
636 av_log(avctx, AV_LOG_ERROR,
637 "low delay forcing is only available for mpeg2\n");
640 if (s->max_b_frames != 0) {
641 av_log(avctx, AV_LOG_ERROR,
642 "b frames cannot be used with low delay\n");
647 if (s->q_scale_type == 1) {
648 if (avctx->qmax > 28) {
649 av_log(avctx, AV_LOG_ERROR,
650 "non linear quant only supports qmax <= 28 currently\n");
655 if (avctx->slices > 1 &&
656 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
657 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
658 return AVERROR(EINVAL);
661 if (s->avctx->thread_count > 1 &&
662 s->codec_id != AV_CODEC_ID_MPEG4 &&
663 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
664 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
665 s->codec_id != AV_CODEC_ID_MJPEG &&
666 (s->codec_id != AV_CODEC_ID_H263P)) {
667 av_log(avctx, AV_LOG_ERROR,
668 "multi threaded encoding not supported by codec\n");
672 if (s->avctx->thread_count < 1) {
673 av_log(avctx, AV_LOG_ERROR,
674 "automatic thread number detection not supported by codec, "
679 if (!avctx->time_base.den || !avctx->time_base.num) {
680 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
684 if (avctx->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
685 av_log(avctx, AV_LOG_INFO,
686 "notice: b_frame_strategy only affects the first pass\n");
687 avctx->b_frame_strategy = 0;
690 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
692 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
693 avctx->time_base.den /= i;
694 avctx->time_base.num /= i;
698 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
699 // (a + x * 3 / 8) / x
700 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
701 s->inter_quant_bias = 0;
703 s->intra_quant_bias = 0;
705 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
708 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
709 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
710 return AVERROR(EINVAL);
713 #if FF_API_QUANT_BIAS
714 FF_DISABLE_DEPRECATION_WARNINGS
715 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
716 s->intra_quant_bias = avctx->intra_quant_bias;
717 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
718 s->inter_quant_bias = avctx->inter_quant_bias;
719 FF_ENABLE_DEPRECATION_WARNINGS
722 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
724 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
725 s->avctx->time_base.den > (1 << 16) - 1) {
726 av_log(avctx, AV_LOG_ERROR,
727 "timebase %d/%d not supported by MPEG 4 standard, "
728 "the maximum admitted value for the timebase denominator "
729 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
733 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
735 switch (avctx->codec->id) {
736 case AV_CODEC_ID_MPEG1VIDEO:
737 s->out_format = FMT_MPEG1;
738 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
739 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
741 case AV_CODEC_ID_MPEG2VIDEO:
742 s->out_format = FMT_MPEG1;
743 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
744 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
747 case AV_CODEC_ID_MJPEG:
748 case AV_CODEC_ID_AMV:
749 s->out_format = FMT_MJPEG;
750 s->intra_only = 1; /* force intra only for jpeg */
751 if (!CONFIG_MJPEG_ENCODER ||
752 ff_mjpeg_encode_init(s) < 0)
757 case AV_CODEC_ID_H261:
758 if (!CONFIG_H261_ENCODER)
760 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
761 av_log(avctx, AV_LOG_ERROR,
762 "The specified picture size of %dx%d is not valid for the "
763 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
764 s->width, s->height);
767 s->out_format = FMT_H261;
770 s->rtp_mode = 0; /* Sliced encoding not supported */
772 case AV_CODEC_ID_H263:
773 if (!CONFIG_H263_ENCODER)
775 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
776 s->width, s->height) == 8) {
777 av_log(avctx, AV_LOG_ERROR,
778 "The specified picture size of %dx%d is not valid for "
779 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
780 "352x288, 704x576, and 1408x1152. "
781 "Try H.263+.\n", s->width, s->height);
784 s->out_format = FMT_H263;
788 case AV_CODEC_ID_H263P:
789 s->out_format = FMT_H263;
792 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
793 s->modified_quant = s->h263_aic;
794 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
795 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
798 /* These are just to be sure */
802 case AV_CODEC_ID_FLV1:
803 s->out_format = FMT_H263;
804 s->h263_flv = 2; /* format = 1; 11-bit codes */
805 s->unrestricted_mv = 1;
806 s->rtp_mode = 0; /* don't allow GOB */
810 case AV_CODEC_ID_RV10:
811 s->out_format = FMT_H263;
815 case AV_CODEC_ID_RV20:
816 s->out_format = FMT_H263;
819 s->modified_quant = 1;
823 s->unrestricted_mv = 0;
825 case AV_CODEC_ID_MPEG4:
826 s->out_format = FMT_H263;
828 s->unrestricted_mv = 1;
829 s->low_delay = s->max_b_frames ? 0 : 1;
830 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
832 case AV_CODEC_ID_MSMPEG4V2:
833 s->out_format = FMT_H263;
835 s->unrestricted_mv = 1;
836 s->msmpeg4_version = 2;
840 case AV_CODEC_ID_MSMPEG4V3:
841 s->out_format = FMT_H263;
843 s->unrestricted_mv = 1;
844 s->msmpeg4_version = 3;
845 s->flipflop_rounding = 1;
849 case AV_CODEC_ID_WMV1:
850 s->out_format = FMT_H263;
852 s->unrestricted_mv = 1;
853 s->msmpeg4_version = 4;
854 s->flipflop_rounding = 1;
858 case AV_CODEC_ID_WMV2:
859 s->out_format = FMT_H263;
861 s->unrestricted_mv = 1;
862 s->msmpeg4_version = 5;
863 s->flipflop_rounding = 1;
871 avctx->has_b_frames = !s->low_delay;
875 s->progressive_frame =
876 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
877 AV_CODEC_FLAG_INTERLACED_ME) ||
882 if (ff_mpv_common_init(s) < 0)
885 ff_fdctdsp_init(&s->fdsp, avctx);
886 ff_me_cmp_init(&s->mecc, avctx);
887 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
888 ff_pixblockdsp_init(&s->pdsp, avctx);
889 ff_qpeldsp_init(&s->qdsp);
891 if (s->msmpeg4_version) {
892 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
893 2 * 2 * (MAX_LEVEL + 1) *
894 (MAX_RUN + 1) * 2 * sizeof(int), fail);
896 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
898 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
899 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
900 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
901 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
902 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
903 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
904 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
905 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
906 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
907 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
909 if (s->avctx->noise_reduction) {
910 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
911 2 * 64 * sizeof(uint16_t), fail);
914 ff_dct_encode_init(s);
916 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
917 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
919 if (s->slice_context_count > 1) {
922 if (avctx->codec_id == AV_CODEC_ID_H263P)
923 s->h263_slice_structured = 1;
926 s->quant_precision = 5;
928 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
929 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->avctx->frame_skip_cmp);
931 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
932 ff_h261_encode_init(s);
933 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
934 ff_h263_encode_init(s);
935 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
936 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
938 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
939 && s->out_format == FMT_MPEG1)
940 ff_mpeg1_encode_init(s);
943 for (i = 0; i < 64; i++) {
944 int j = s->idsp.idct_permutation[i];
945 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
947 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
948 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
949 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
951 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
954 s->chroma_intra_matrix[j] =
955 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
956 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
958 if (s->avctx->intra_matrix)
959 s->intra_matrix[j] = s->avctx->intra_matrix[i];
960 if (s->avctx->inter_matrix)
961 s->inter_matrix[j] = s->avctx->inter_matrix[i];
964 /* precompute matrix */
965 /* for mjpeg, we do include qscale in the matrix */
966 if (s->out_format != FMT_MJPEG) {
967 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
968 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
970 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
971 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
975 if (ff_rate_control_init(s) < 0)
978 #if FF_API_ERROR_RATE
979 FF_DISABLE_DEPRECATION_WARNINGS
980 if (avctx->error_rate)
981 s->error_rate = avctx->error_rate;
982 FF_ENABLE_DEPRECATION_WARNINGS;
985 #if FF_API_NORMALIZE_AQP
986 FF_DISABLE_DEPRECATION_WARNINGS
987 if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
988 s->mpv_flags |= FF_MPV_FLAG_NAQ;
989 FF_ENABLE_DEPRECATION_WARNINGS;
993 FF_DISABLE_DEPRECATION_WARNINGS
994 if (avctx->flags & CODEC_FLAG_MV0)
995 s->mpv_flags |= FF_MPV_FLAG_MV0;
996 FF_ENABLE_DEPRECATION_WARNINGS
1000 FF_DISABLE_DEPRECATION_WARNINGS
1001 if (avctx->rc_qsquish != 0.0)
1002 s->rc_qsquish = avctx->rc_qsquish;
1003 if (avctx->rc_qmod_amp != 0.0)
1004 s->rc_qmod_amp = avctx->rc_qmod_amp;
1005 if (avctx->rc_qmod_freq)
1006 s->rc_qmod_freq = avctx->rc_qmod_freq;
1007 if (avctx->rc_buffer_aggressivity != 1.0)
1008 s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
1009 if (avctx->rc_initial_cplx != 0.0)
1010 s->rc_initial_cplx = avctx->rc_initial_cplx;
1012 s->lmin = avctx->lmin;
1014 s->lmax = avctx->lmax;
1017 av_freep(&s->rc_eq);
1018 s->rc_eq = av_strdup(avctx->rc_eq);
1020 return AVERROR(ENOMEM);
1022 FF_ENABLE_DEPRECATION_WARNINGS
1025 if (avctx->b_frame_strategy == 2) {
1026 for (i = 0; i < s->max_b_frames + 2; i++) {
1027 s->tmp_frames[i] = av_frame_alloc();
1028 if (!s->tmp_frames[i])
1029 return AVERROR(ENOMEM);
1031 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1032 s->tmp_frames[i]->width = s->width >> avctx->brd_scale;
1033 s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
1035 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1041 cpb_props = ff_add_cpb_side_data(avctx);
1043 return AVERROR(ENOMEM);
1044 cpb_props->max_bitrate = avctx->rc_max_rate;
1045 cpb_props->min_bitrate = avctx->rc_min_rate;
1046 cpb_props->avg_bitrate = avctx->bit_rate;
1047 cpb_props->buffer_size = avctx->rc_buffer_size;
1051 ff_mpv_encode_end(avctx);
1052 return AVERROR_UNKNOWN;
1055 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1057 MpegEncContext *s = avctx->priv_data;
1060 ff_rate_control_uninit(s);
1062 ff_mpv_common_end(s);
1063 if (CONFIG_MJPEG_ENCODER &&
1064 s->out_format == FMT_MJPEG)
1065 ff_mjpeg_encode_close(s);
1067 av_freep(&avctx->extradata);
1069 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1070 av_frame_free(&s->tmp_frames[i]);
1072 ff_free_picture_tables(&s->new_picture);
1073 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1075 av_freep(&s->avctx->stats_out);
1076 av_freep(&s->ac_stats);
1078 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1079 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1080 s->q_chroma_intra_matrix= NULL;
1081 s->q_chroma_intra_matrix16= NULL;
1082 av_freep(&s->q_intra_matrix);
1083 av_freep(&s->q_inter_matrix);
1084 av_freep(&s->q_intra_matrix16);
1085 av_freep(&s->q_inter_matrix16);
1086 av_freep(&s->input_picture);
1087 av_freep(&s->reordered_input_picture);
1088 av_freep(&s->dct_offset);
1093 static int get_sae(uint8_t *src, int ref, int stride)
1098 for (y = 0; y < 16; y++) {
1099 for (x = 0; x < 16; x++) {
1100 acc += FFABS(src[x + y * stride] - ref);
1107 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1108 uint8_t *ref, int stride)
1114 h = s->height & ~15;
1116 for (y = 0; y < h; y += 16) {
1117 for (x = 0; x < w; x += 16) {
1118 int offset = x + y * stride;
1119 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1121 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1122 int sae = get_sae(src + offset, mean, stride);
1124 acc += sae + 500 < sad;
1130 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1132 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1133 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1134 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1135 &s->linesize, &s->uvlinesize);
1138 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1140 Picture *pic = NULL;
1142 int i, display_picture_number = 0, ret;
1143 int encoding_delay = s->max_b_frames ? s->max_b_frames
1144 : (s->low_delay ? 0 : 1);
1145 int flush_offset = 1;
1150 display_picture_number = s->input_picture_number++;
1152 if (pts != AV_NOPTS_VALUE) {
1153 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1154 int64_t last = s->user_specified_pts;
1157 av_log(s->avctx, AV_LOG_ERROR,
1158 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1160 return AVERROR(EINVAL);
1163 if (!s->low_delay && display_picture_number == 1)
1164 s->dts_delta = pts - last;
1166 s->user_specified_pts = pts;
1168 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1169 s->user_specified_pts =
1170 pts = s->user_specified_pts + 1;
1171 av_log(s->avctx, AV_LOG_INFO,
1172 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1175 pts = display_picture_number;
1179 if (!pic_arg->buf[0] ||
1180 pic_arg->linesize[0] != s->linesize ||
1181 pic_arg->linesize[1] != s->uvlinesize ||
1182 pic_arg->linesize[2] != s->uvlinesize)
1184 if ((s->width & 15) || (s->height & 15))
1186 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1188 if (s->linesize & (STRIDE_ALIGN-1))
1191 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1192 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1194 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1198 pic = &s->picture[i];
1202 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1205 ret = alloc_picture(s, pic, direct);
1210 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1211 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1212 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1215 int h_chroma_shift, v_chroma_shift;
1216 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1220 for (i = 0; i < 3; i++) {
1221 int src_stride = pic_arg->linesize[i];
1222 int dst_stride = i ? s->uvlinesize : s->linesize;
1223 int h_shift = i ? h_chroma_shift : 0;
1224 int v_shift = i ? v_chroma_shift : 0;
1225 int w = s->width >> h_shift;
1226 int h = s->height >> v_shift;
1227 uint8_t *src = pic_arg->data[i];
1228 uint8_t *dst = pic->f->data[i];
1231 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1232 && !s->progressive_sequence
1233 && FFALIGN(s->height, 32) - s->height > 16)
1236 if (!s->avctx->rc_buffer_size)
1237 dst += INPLACE_OFFSET;
1239 if (src_stride == dst_stride)
1240 memcpy(dst, src, src_stride * h);
1243 uint8_t *dst2 = dst;
1245 memcpy(dst2, src, w);
1250 if ((s->width & 15) || (s->height & (vpad-1))) {
1251 s->mpvencdsp.draw_edges(dst, dst_stride,
1260 ret = av_frame_copy_props(pic->f, pic_arg);
1264 pic->f->display_picture_number = display_picture_number;
1265 pic->f->pts = pts; // we set this here to avoid modifiying pic_arg
1267 /* Flushing: When we have not received enough input frames,
1268 * ensure s->input_picture[0] contains the first picture */
1269 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1270 if (s->input_picture[flush_offset])
1273 if (flush_offset <= 1)
1276 encoding_delay = encoding_delay - flush_offset + 1;
1279 /* shift buffer entries */
1280 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1281 s->input_picture[i - flush_offset] = s->input_picture[i];
1283 s->input_picture[encoding_delay] = (Picture*) pic;
1288 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1292 int64_t score64 = 0;
1294 for (plane = 0; plane < 3; plane++) {
1295 const int stride = p->f->linesize[plane];
1296 const int bw = plane ? 1 : 2;
1297 for (y = 0; y < s->mb_height * bw; y++) {
1298 for (x = 0; x < s->mb_width * bw; x++) {
1299 int off = p->shared ? 0 : 16;
1300 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1301 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1302 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1304 switch (FFABS(s->avctx->frame_skip_exp)) {
1305 case 0: score = FFMAX(score, v); break;
1306 case 1: score += FFABS(v); break;
1307 case 2: score64 += v * (int64_t)v; break;
1308 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1309 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1318 if (s->avctx->frame_skip_exp < 0)
1319 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1320 -1.0/s->avctx->frame_skip_exp);
1322 if (score64 < s->avctx->frame_skip_threshold)
1324 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1329 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1331 AVPacket pkt = { 0 };
1332 int ret, got_output;
1334 av_init_packet(&pkt);
1335 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1340 av_packet_unref(&pkt);
1344 static int estimate_best_b_count(MpegEncContext *s)
1346 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1347 AVCodecContext *c = avcodec_alloc_context3(NULL);
1348 const int scale = s->avctx->brd_scale;
1349 int i, j, out_size, p_lambda, b_lambda, lambda2;
1350 int64_t best_rd = INT64_MAX;
1351 int best_b_count = -1;
1354 return AVERROR(ENOMEM);
1355 av_assert0(scale >= 0 && scale <= 3);
1358 //s->next_picture_ptr->quality;
1359 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1360 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1361 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1362 if (!b_lambda) // FIXME we should do this somewhere else
1363 b_lambda = p_lambda;
1364 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1367 c->width = s->width >> scale;
1368 c->height = s->height >> scale;
1369 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1370 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1371 c->mb_decision = s->avctx->mb_decision;
1372 c->me_cmp = s->avctx->me_cmp;
1373 c->mb_cmp = s->avctx->mb_cmp;
1374 c->me_sub_cmp = s->avctx->me_sub_cmp;
1375 c->pix_fmt = AV_PIX_FMT_YUV420P;
1376 c->time_base = s->avctx->time_base;
1377 c->max_b_frames = s->max_b_frames;
1379 if (avcodec_open2(c, codec, NULL) < 0)
1382 for (i = 0; i < s->max_b_frames + 2; i++) {
1383 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1384 s->next_picture_ptr;
1387 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1388 pre_input = *pre_input_ptr;
1389 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1391 if (!pre_input.shared && i) {
1392 data[0] += INPLACE_OFFSET;
1393 data[1] += INPLACE_OFFSET;
1394 data[2] += INPLACE_OFFSET;
1397 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1398 s->tmp_frames[i]->linesize[0],
1400 pre_input.f->linesize[0],
1401 c->width, c->height);
1402 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1403 s->tmp_frames[i]->linesize[1],
1405 pre_input.f->linesize[1],
1406 c->width >> 1, c->height >> 1);
1407 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1408 s->tmp_frames[i]->linesize[2],
1410 pre_input.f->linesize[2],
1411 c->width >> 1, c->height >> 1);
1415 for (j = 0; j < s->max_b_frames + 1; j++) {
1418 if (!s->input_picture[j])
1421 c->error[0] = c->error[1] = c->error[2] = 0;
1423 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1424 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1426 out_size = encode_frame(c, s->tmp_frames[0]);
1428 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1430 for (i = 0; i < s->max_b_frames + 1; i++) {
1431 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1433 s->tmp_frames[i + 1]->pict_type = is_p ?
1434 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1435 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1437 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1439 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1442 /* get the delayed frames */
1444 out_size = encode_frame(c, NULL);
1445 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1448 rd += c->error[0] + c->error[1] + c->error[2];
1459 return best_b_count;
1462 static int select_input_picture(MpegEncContext *s)
1466 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1467 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1468 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1470 /* set next picture type & ordering */
1471 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1472 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1473 if (s->picture_in_gop_number < s->gop_size &&
1474 s->next_picture_ptr &&
1475 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1476 // FIXME check that te gop check above is +-1 correct
1477 av_frame_unref(s->input_picture[0]->f);
1479 ff_vbv_update(s, 0);
1485 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1486 !s->next_picture_ptr || s->intra_only) {
1487 s->reordered_input_picture[0] = s->input_picture[0];
1488 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1489 s->reordered_input_picture[0]->f->coded_picture_number =
1490 s->coded_picture_number++;
1494 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1495 for (i = 0; i < s->max_b_frames + 1; i++) {
1496 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1498 if (pict_num >= s->rc_context.num_entries)
1500 if (!s->input_picture[i]) {
1501 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1505 s->input_picture[i]->f->pict_type =
1506 s->rc_context.entry[pict_num].new_pict_type;
1510 if (s->avctx->b_frame_strategy == 0) {
1511 b_frames = s->max_b_frames;
1512 while (b_frames && !s->input_picture[b_frames])
1514 } else if (s->avctx->b_frame_strategy == 1) {
1515 for (i = 1; i < s->max_b_frames + 1; i++) {
1516 if (s->input_picture[i] &&
1517 s->input_picture[i]->b_frame_score == 0) {
1518 s->input_picture[i]->b_frame_score =
1520 s->input_picture[i ]->f->data[0],
1521 s->input_picture[i - 1]->f->data[0],
1525 for (i = 0; i < s->max_b_frames + 1; i++) {
1526 if (!s->input_picture[i] ||
1527 s->input_picture[i]->b_frame_score - 1 >
1528 s->mb_num / s->avctx->b_sensitivity)
1532 b_frames = FFMAX(0, i - 1);
1535 for (i = 0; i < b_frames + 1; i++) {
1536 s->input_picture[i]->b_frame_score = 0;
1538 } else if (s->avctx->b_frame_strategy == 2) {
1539 b_frames = estimate_best_b_count(s);
1541 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1547 for (i = b_frames - 1; i >= 0; i--) {
1548 int type = s->input_picture[i]->f->pict_type;
1549 if (type && type != AV_PICTURE_TYPE_B)
1552 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1553 b_frames == s->max_b_frames) {
1554 av_log(s->avctx, AV_LOG_ERROR,
1555 "warning, too many b frames in a row\n");
1558 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1559 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1560 s->gop_size > s->picture_in_gop_number) {
1561 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1563 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1565 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1569 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1570 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1573 s->reordered_input_picture[0] = s->input_picture[b_frames];
1574 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1575 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1576 s->reordered_input_picture[0]->f->coded_picture_number =
1577 s->coded_picture_number++;
1578 for (i = 0; i < b_frames; i++) {
1579 s->reordered_input_picture[i + 1] = s->input_picture[i];
1580 s->reordered_input_picture[i + 1]->f->pict_type =
1582 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1583 s->coded_picture_number++;
1588 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1590 if (s->reordered_input_picture[0]) {
1591 s->reordered_input_picture[0]->reference =
1592 s->reordered_input_picture[0]->f->pict_type !=
1593 AV_PICTURE_TYPE_B ? 3 : 0;
1595 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1598 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1599 // input is a shared pix, so we can't modifiy it -> alloc a new
1600 // one & ensure that the shared one is reuseable
1603 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1606 pic = &s->picture[i];
1608 pic->reference = s->reordered_input_picture[0]->reference;
1609 if (alloc_picture(s, pic, 0) < 0) {
1613 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1617 /* mark us unused / free shared pic */
1618 av_frame_unref(s->reordered_input_picture[0]->f);
1619 s->reordered_input_picture[0]->shared = 0;
1621 s->current_picture_ptr = pic;
1623 // input is not a shared pix -> reuse buffer for current_pix
1624 s->current_picture_ptr = s->reordered_input_picture[0];
1625 for (i = 0; i < 4; i++) {
1626 s->new_picture.f->data[i] += INPLACE_OFFSET;
1629 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1630 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1631 s->current_picture_ptr)) < 0)
1634 s->picture_number = s->new_picture.f->display_picture_number;
1639 static void frame_end(MpegEncContext *s)
1641 if (s->unrestricted_mv &&
1642 s->current_picture.reference &&
1644 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1645 int hshift = desc->log2_chroma_w;
1646 int vshift = desc->log2_chroma_h;
1647 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1648 s->current_picture.f->linesize[0],
1649 s->h_edge_pos, s->v_edge_pos,
1650 EDGE_WIDTH, EDGE_WIDTH,
1651 EDGE_TOP | EDGE_BOTTOM);
1652 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1653 s->current_picture.f->linesize[1],
1654 s->h_edge_pos >> hshift,
1655 s->v_edge_pos >> vshift,
1656 EDGE_WIDTH >> hshift,
1657 EDGE_WIDTH >> vshift,
1658 EDGE_TOP | EDGE_BOTTOM);
1659 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1660 s->current_picture.f->linesize[2],
1661 s->h_edge_pos >> hshift,
1662 s->v_edge_pos >> vshift,
1663 EDGE_WIDTH >> hshift,
1664 EDGE_WIDTH >> vshift,
1665 EDGE_TOP | EDGE_BOTTOM);
1670 s->last_pict_type = s->pict_type;
1671 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1672 if (s->pict_type!= AV_PICTURE_TYPE_B)
1673 s->last_non_b_pict_type = s->pict_type;
1675 #if FF_API_CODED_FRAME
1676 FF_DISABLE_DEPRECATION_WARNINGS
1677 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1678 FF_ENABLE_DEPRECATION_WARNINGS
1680 #if FF_API_ERROR_FRAME
1681 FF_DISABLE_DEPRECATION_WARNINGS
1682 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1683 sizeof(s->current_picture.encoding_error));
1684 FF_ENABLE_DEPRECATION_WARNINGS
1688 static void update_noise_reduction(MpegEncContext *s)
1692 for (intra = 0; intra < 2; intra++) {
1693 if (s->dct_count[intra] > (1 << 16)) {
1694 for (i = 0; i < 64; i++) {
1695 s->dct_error_sum[intra][i] >>= 1;
1697 s->dct_count[intra] >>= 1;
1700 for (i = 0; i < 64; i++) {
1701 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1702 s->dct_count[intra] +
1703 s->dct_error_sum[intra][i] / 2) /
1704 (s->dct_error_sum[intra][i] + 1);
1709 static int frame_start(MpegEncContext *s)
1713 /* mark & release old frames */
1714 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1715 s->last_picture_ptr != s->next_picture_ptr &&
1716 s->last_picture_ptr->f->buf[0]) {
1717 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1720 s->current_picture_ptr->f->pict_type = s->pict_type;
1721 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1723 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1724 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1725 s->current_picture_ptr)) < 0)
1728 if (s->pict_type != AV_PICTURE_TYPE_B) {
1729 s->last_picture_ptr = s->next_picture_ptr;
1731 s->next_picture_ptr = s->current_picture_ptr;
1734 if (s->last_picture_ptr) {
1735 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1736 if (s->last_picture_ptr->f->buf[0] &&
1737 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1738 s->last_picture_ptr)) < 0)
1741 if (s->next_picture_ptr) {
1742 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1743 if (s->next_picture_ptr->f->buf[0] &&
1744 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1745 s->next_picture_ptr)) < 0)
1749 if (s->picture_structure!= PICT_FRAME) {
1751 for (i = 0; i < 4; i++) {
1752 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1753 s->current_picture.f->data[i] +=
1754 s->current_picture.f->linesize[i];
1756 s->current_picture.f->linesize[i] *= 2;
1757 s->last_picture.f->linesize[i] *= 2;
1758 s->next_picture.f->linesize[i] *= 2;
1762 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1763 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1764 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1765 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1766 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1767 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1769 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1770 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1773 if (s->dct_error_sum) {
1774 av_assert2(s->avctx->noise_reduction && s->encoding);
1775 update_noise_reduction(s);
1781 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1782 const AVFrame *pic_arg, int *got_packet)
1784 MpegEncContext *s = avctx->priv_data;
1785 int i, stuffing_count, ret;
1786 int context_count = s->slice_context_count;
1788 s->vbv_ignore_qmax = 0;
1790 s->picture_in_gop_number++;
1792 if (load_input_picture(s, pic_arg) < 0)
1795 if (select_input_picture(s) < 0) {
1800 if (s->new_picture.f->data[0]) {
1801 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1802 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1804 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1805 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1808 s->mb_info_ptr = av_packet_new_side_data(pkt,
1809 AV_PKT_DATA_H263_MB_INFO,
1810 s->mb_width*s->mb_height*12);
1811 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1814 for (i = 0; i < context_count; i++) {
1815 int start_y = s->thread_context[i]->start_mb_y;
1816 int end_y = s->thread_context[i]-> end_mb_y;
1817 int h = s->mb_height;
1818 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1819 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1821 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1824 s->pict_type = s->new_picture.f->pict_type;
1826 ret = frame_start(s);
1830 ret = encode_picture(s, s->picture_number);
1831 if (growing_buffer) {
1832 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1833 pkt->data = s->pb.buf;
1834 pkt->size = avctx->internal->byte_buffer_size;
1839 #if FF_API_STAT_BITS
1840 FF_DISABLE_DEPRECATION_WARNINGS
1841 avctx->header_bits = s->header_bits;
1842 avctx->mv_bits = s->mv_bits;
1843 avctx->misc_bits = s->misc_bits;
1844 avctx->i_tex_bits = s->i_tex_bits;
1845 avctx->p_tex_bits = s->p_tex_bits;
1846 avctx->i_count = s->i_count;
1847 // FIXME f/b_count in avctx
1848 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1849 avctx->skip_count = s->skip_count;
1850 FF_ENABLE_DEPRECATION_WARNINGS
1855 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1856 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1858 if (avctx->rc_buffer_size) {
1859 RateControlContext *rcc = &s->rc_context;
1860 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1861 int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1862 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1864 if (put_bits_count(&s->pb) > max_size &&
1865 s->lambda < s->lmax) {
1866 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1867 (s->qscale + 1) / s->qscale);
1868 if (s->adaptive_quant) {
1870 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1871 s->lambda_table[i] =
1872 FFMAX(s->lambda_table[i] + min_step,
1873 s->lambda_table[i] * (s->qscale + 1) /
1876 s->mb_skipped = 0; // done in frame_start()
1877 // done in encode_picture() so we must undo it
1878 if (s->pict_type == AV_PICTURE_TYPE_P) {
1879 if (s->flipflop_rounding ||
1880 s->codec_id == AV_CODEC_ID_H263P ||
1881 s->codec_id == AV_CODEC_ID_MPEG4)
1882 s->no_rounding ^= 1;
1884 if (s->pict_type != AV_PICTURE_TYPE_B) {
1885 s->time_base = s->last_time_base;
1886 s->last_non_b_time = s->time - s->pp_time;
1888 for (i = 0; i < context_count; i++) {
1889 PutBitContext *pb = &s->thread_context[i]->pb;
1890 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1892 s->vbv_ignore_qmax = 1;
1893 av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1897 av_assert0(s->avctx->rc_max_rate);
1900 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1901 ff_write_pass1_stats(s);
1903 for (i = 0; i < 4; i++) {
1904 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1905 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1907 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1908 s->current_picture_ptr->encoding_error,
1909 (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1912 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1913 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1914 s->misc_bits + s->i_tex_bits +
1916 flush_put_bits(&s->pb);
1917 s->frame_bits = put_bits_count(&s->pb);
1919 stuffing_count = ff_vbv_update(s, s->frame_bits);
1920 s->stuffing_bits = 8*stuffing_count;
1921 if (stuffing_count) {
1922 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1923 stuffing_count + 50) {
1924 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1928 switch (s->codec_id) {
1929 case AV_CODEC_ID_MPEG1VIDEO:
1930 case AV_CODEC_ID_MPEG2VIDEO:
1931 while (stuffing_count--) {
1932 put_bits(&s->pb, 8, 0);
1935 case AV_CODEC_ID_MPEG4:
1936 put_bits(&s->pb, 16, 0);
1937 put_bits(&s->pb, 16, 0x1C3);
1938 stuffing_count -= 4;
1939 while (stuffing_count--) {
1940 put_bits(&s->pb, 8, 0xFF);
1944 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1946 flush_put_bits(&s->pb);
1947 s->frame_bits = put_bits_count(&s->pb);
1950 /* update mpeg1/2 vbv_delay for CBR */
1951 if (s->avctx->rc_max_rate &&
1952 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1953 s->out_format == FMT_MPEG1 &&
1954 90000LL * (avctx->rc_buffer_size - 1) <=
1955 s->avctx->rc_max_rate * 0xFFFFLL) {
1956 AVCPBProperties *props;
1959 int vbv_delay, min_delay;
1960 double inbits = s->avctx->rc_max_rate *
1961 av_q2d(s->avctx->time_base);
1962 int minbits = s->frame_bits - 8 *
1963 (s->vbv_delay_ptr - s->pb.buf - 1);
1964 double bits = s->rc_context.buffer_index + minbits - inbits;
1967 av_log(s->avctx, AV_LOG_ERROR,
1968 "Internal error, negative bits\n");
1970 assert(s->repeat_first_field == 0);
1972 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1973 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1974 s->avctx->rc_max_rate;
1976 vbv_delay = FFMAX(vbv_delay, min_delay);
1978 av_assert0(vbv_delay < 0xFFFF);
1980 s->vbv_delay_ptr[0] &= 0xF8;
1981 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1982 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1983 s->vbv_delay_ptr[2] &= 0x07;
1984 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1986 props = av_cpb_properties_alloc(&props_size);
1988 return AVERROR(ENOMEM);
1989 props->vbv_delay = vbv_delay * 300;
1991 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
1992 (uint8_t*)props, props_size);
1998 #if FF_API_VBV_DELAY
1999 FF_DISABLE_DEPRECATION_WARNINGS
2000 avctx->vbv_delay = vbv_delay * 300;
2001 FF_ENABLE_DEPRECATION_WARNINGS
2004 s->total_bits += s->frame_bits;
2005 #if FF_API_STAT_BITS
2006 FF_DISABLE_DEPRECATION_WARNINGS
2007 avctx->frame_bits = s->frame_bits;
2008 FF_ENABLE_DEPRECATION_WARNINGS
2012 pkt->pts = s->current_picture.f->pts;
2013 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2014 if (!s->current_picture.f->coded_picture_number)
2015 pkt->dts = pkt->pts - s->dts_delta;
2017 pkt->dts = s->reordered_pts;
2018 s->reordered_pts = pkt->pts;
2020 pkt->dts = pkt->pts;
2021 if (s->current_picture.f->key_frame)
2022 pkt->flags |= AV_PKT_FLAG_KEY;
2024 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2029 /* release non-reference frames */
2030 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2031 if (!s->picture[i].reference)
2032 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2035 av_assert1((s->frame_bits & 7) == 0);
2037 pkt->size = s->frame_bits / 8;
2038 *got_packet = !!pkt->size;
2042 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2043 int n, int threshold)
2045 static const char tab[64] = {
2046 3, 2, 2, 1, 1, 1, 1, 1,
2047 1, 1, 1, 1, 1, 1, 1, 1,
2048 1, 1, 1, 1, 1, 1, 1, 1,
2049 0, 0, 0, 0, 0, 0, 0, 0,
2050 0, 0, 0, 0, 0, 0, 0, 0,
2051 0, 0, 0, 0, 0, 0, 0, 0,
2052 0, 0, 0, 0, 0, 0, 0, 0,
2053 0, 0, 0, 0, 0, 0, 0, 0
2058 int16_t *block = s->block[n];
2059 const int last_index = s->block_last_index[n];
2062 if (threshold < 0) {
2064 threshold = -threshold;
2068 /* Are all we could set to zero already zero? */
2069 if (last_index <= skip_dc - 1)
2072 for (i = 0; i <= last_index; i++) {
2073 const int j = s->intra_scantable.permutated[i];
2074 const int level = FFABS(block[j]);
2076 if (skip_dc && i == 0)
2080 } else if (level > 1) {
2086 if (score >= threshold)
2088 for (i = skip_dc; i <= last_index; i++) {
2089 const int j = s->intra_scantable.permutated[i];
2093 s->block_last_index[n] = 0;
2095 s->block_last_index[n] = -1;
2098 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2102 const int maxlevel = s->max_qcoeff;
2103 const int minlevel = s->min_qcoeff;
2107 i = 1; // skip clipping of intra dc
2111 for (; i <= last_index; i++) {
2112 const int j = s->intra_scantable.permutated[i];
2113 int level = block[j];
2115 if (level > maxlevel) {
2118 } else if (level < minlevel) {
2126 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2127 av_log(s->avctx, AV_LOG_INFO,
2128 "warning, clipping %d dct coefficients to %d..%d\n",
2129 overflow, minlevel, maxlevel);
2132 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2136 for (y = 0; y < 8; y++) {
2137 for (x = 0; x < 8; x++) {
2143 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2144 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2145 int v = ptr[x2 + y2 * stride];
2151 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2156 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2157 int motion_x, int motion_y,
2158 int mb_block_height,
2162 int16_t weight[12][64];
2163 int16_t orig[12][64];
2164 const int mb_x = s->mb_x;
2165 const int mb_y = s->mb_y;
2168 int dct_offset = s->linesize * 8; // default for progressive frames
2169 int uv_dct_offset = s->uvlinesize * 8;
2170 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2171 ptrdiff_t wrap_y, wrap_c;
2173 for (i = 0; i < mb_block_count; i++)
2174 skip_dct[i] = s->skipdct;
2176 if (s->adaptive_quant) {
2177 const int last_qp = s->qscale;
2178 const int mb_xy = mb_x + mb_y * s->mb_stride;
2180 s->lambda = s->lambda_table[mb_xy];
2183 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2184 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2185 s->dquant = s->qscale - last_qp;
2187 if (s->out_format == FMT_H263) {
2188 s->dquant = av_clip(s->dquant, -2, 2);
2190 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2192 if (s->pict_type == AV_PICTURE_TYPE_B) {
2193 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2196 if (s->mv_type == MV_TYPE_8X8)
2202 ff_set_qscale(s, last_qp + s->dquant);
2203 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2204 ff_set_qscale(s, s->qscale + s->dquant);
2206 wrap_y = s->linesize;
2207 wrap_c = s->uvlinesize;
2208 ptr_y = s->new_picture.f->data[0] +
2209 (mb_y * 16 * wrap_y) + mb_x * 16;
2210 ptr_cb = s->new_picture.f->data[1] +
2211 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2212 ptr_cr = s->new_picture.f->data[2] +
2213 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2215 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2216 uint8_t *ebuf = s->sc.edge_emu_buffer + 36 * wrap_y;
2217 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2218 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2219 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2221 16, 16, mb_x * 16, mb_y * 16,
2222 s->width, s->height);
2224 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2226 mb_block_width, mb_block_height,
2227 mb_x * mb_block_width, mb_y * mb_block_height,
2229 ptr_cb = ebuf + 16 * wrap_y;
2230 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2232 mb_block_width, mb_block_height,
2233 mb_x * mb_block_width, mb_y * mb_block_height,
2235 ptr_cr = ebuf + 16 * wrap_y + 16;
2239 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2240 int progressive_score, interlaced_score;
2242 s->interlaced_dct = 0;
2243 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2244 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2245 NULL, wrap_y, 8) - 400;
2247 if (progressive_score > 0) {
2248 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2249 NULL, wrap_y * 2, 8) +
2250 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2251 NULL, wrap_y * 2, 8);
2252 if (progressive_score > interlaced_score) {
2253 s->interlaced_dct = 1;
2255 dct_offset = wrap_y;
2256 uv_dct_offset = wrap_c;
2258 if (s->chroma_format == CHROMA_422 ||
2259 s->chroma_format == CHROMA_444)
2265 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2266 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2267 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2268 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2270 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2274 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2275 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2276 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2277 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2278 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2279 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2280 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2281 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2282 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2283 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2284 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2285 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2289 op_pixels_func (*op_pix)[4];
2290 qpel_mc_func (*op_qpix)[16];
2291 uint8_t *dest_y, *dest_cb, *dest_cr;
2293 dest_y = s->dest[0];
2294 dest_cb = s->dest[1];
2295 dest_cr = s->dest[2];
2297 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2298 op_pix = s->hdsp.put_pixels_tab;
2299 op_qpix = s->qdsp.put_qpel_pixels_tab;
2301 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2302 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2305 if (s->mv_dir & MV_DIR_FORWARD) {
2306 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2307 s->last_picture.f->data,
2309 op_pix = s->hdsp.avg_pixels_tab;
2310 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2312 if (s->mv_dir & MV_DIR_BACKWARD) {
2313 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2314 s->next_picture.f->data,
2318 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2319 int progressive_score, interlaced_score;
2321 s->interlaced_dct = 0;
2322 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2323 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2327 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2328 progressive_score -= 400;
2330 if (progressive_score > 0) {
2331 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2333 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2337 if (progressive_score > interlaced_score) {
2338 s->interlaced_dct = 1;
2340 dct_offset = wrap_y;
2341 uv_dct_offset = wrap_c;
2343 if (s->chroma_format == CHROMA_422)
2349 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2350 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2351 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2352 dest_y + dct_offset, wrap_y);
2353 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2354 dest_y + dct_offset + 8, wrap_y);
2356 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2360 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2361 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2362 if (!s->chroma_y_shift) { /* 422 */
2363 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2364 dest_cb + uv_dct_offset, wrap_c);
2365 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2366 dest_cr + uv_dct_offset, wrap_c);
2369 /* pre quantization */
2370 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2371 2 * s->qscale * s->qscale) {
2373 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2375 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2377 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2378 wrap_y, 8) < 20 * s->qscale)
2380 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2381 wrap_y, 8) < 20 * s->qscale)
2383 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2385 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2387 if (!s->chroma_y_shift) { /* 422 */
2388 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2389 dest_cb + uv_dct_offset,
2390 wrap_c, 8) < 20 * s->qscale)
2392 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2393 dest_cr + uv_dct_offset,
2394 wrap_c, 8) < 20 * s->qscale)
2400 if (s->quantizer_noise_shaping) {
2402 get_visual_weight(weight[0], ptr_y , wrap_y);
2404 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2406 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2408 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2410 get_visual_weight(weight[4], ptr_cb , wrap_c);
2412 get_visual_weight(weight[5], ptr_cr , wrap_c);
2413 if (!s->chroma_y_shift) { /* 422 */
2415 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2418 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2421 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2424 /* DCT & quantize */
2425 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2427 for (i = 0; i < mb_block_count; i++) {
2430 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2431 // FIXME we could decide to change to quantizer instead of
2433 // JS: I don't think that would be a good idea it could lower
2434 // quality instead of improve it. Just INTRADC clipping
2435 // deserves changes in quantizer
2437 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2439 s->block_last_index[i] = -1;
2441 if (s->quantizer_noise_shaping) {
2442 for (i = 0; i < mb_block_count; i++) {
2444 s->block_last_index[i] =
2445 dct_quantize_refine(s, s->block[i], weight[i],
2446 orig[i], i, s->qscale);
2451 if (s->luma_elim_threshold && !s->mb_intra)
2452 for (i = 0; i < 4; i++)
2453 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2454 if (s->chroma_elim_threshold && !s->mb_intra)
2455 for (i = 4; i < mb_block_count; i++)
2456 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2458 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2459 for (i = 0; i < mb_block_count; i++) {
2460 if (s->block_last_index[i] == -1)
2461 s->coded_score[i] = INT_MAX / 256;
2466 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2467 s->block_last_index[4] =
2468 s->block_last_index[5] = 0;
2470 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2471 if (!s->chroma_y_shift) { /* 422 / 444 */
2472 for (i=6; i<12; i++) {
2473 s->block_last_index[i] = 0;
2474 s->block[i][0] = s->block[4][0];
2479 // non c quantize code returns incorrect block_last_index FIXME
2480 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2481 for (i = 0; i < mb_block_count; i++) {
2483 if (s->block_last_index[i] > 0) {
2484 for (j = 63; j > 0; j--) {
2485 if (s->block[i][s->intra_scantable.permutated[j]])
2488 s->block_last_index[i] = j;
2493 /* huffman encode */
2494 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2495 case AV_CODEC_ID_MPEG1VIDEO:
2496 case AV_CODEC_ID_MPEG2VIDEO:
2497 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2498 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2500 case AV_CODEC_ID_MPEG4:
2501 if (CONFIG_MPEG4_ENCODER)
2502 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2504 case AV_CODEC_ID_MSMPEG4V2:
2505 case AV_CODEC_ID_MSMPEG4V3:
2506 case AV_CODEC_ID_WMV1:
2507 if (CONFIG_MSMPEG4_ENCODER)
2508 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2510 case AV_CODEC_ID_WMV2:
2511 if (CONFIG_WMV2_ENCODER)
2512 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2514 case AV_CODEC_ID_H261:
2515 if (CONFIG_H261_ENCODER)
2516 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2518 case AV_CODEC_ID_H263:
2519 case AV_CODEC_ID_H263P:
2520 case AV_CODEC_ID_FLV1:
2521 case AV_CODEC_ID_RV10:
2522 case AV_CODEC_ID_RV20:
2523 if (CONFIG_H263_ENCODER)
2524 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2526 case AV_CODEC_ID_MJPEG:
2527 case AV_CODEC_ID_AMV:
2528 if (CONFIG_MJPEG_ENCODER)
2529 ff_mjpeg_encode_mb(s, s->block);
2536 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2538 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2539 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2540 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2543 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2546 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2549 d->mb_skip_run= s->mb_skip_run;
2551 d->last_dc[i] = s->last_dc[i];
2554 d->mv_bits= s->mv_bits;
2555 d->i_tex_bits= s->i_tex_bits;
2556 d->p_tex_bits= s->p_tex_bits;
2557 d->i_count= s->i_count;
2558 d->f_count= s->f_count;
2559 d->b_count= s->b_count;
2560 d->skip_count= s->skip_count;
2561 d->misc_bits= s->misc_bits;
2565 d->qscale= s->qscale;
2566 d->dquant= s->dquant;
2568 d->esc3_level_length= s->esc3_level_length;
2571 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2574 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2575 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2578 d->mb_skip_run= s->mb_skip_run;
2580 d->last_dc[i] = s->last_dc[i];
2583 d->mv_bits= s->mv_bits;
2584 d->i_tex_bits= s->i_tex_bits;
2585 d->p_tex_bits= s->p_tex_bits;
2586 d->i_count= s->i_count;
2587 d->f_count= s->f_count;
2588 d->b_count= s->b_count;
2589 d->skip_count= s->skip_count;
2590 d->misc_bits= s->misc_bits;
2592 d->mb_intra= s->mb_intra;
2593 d->mb_skipped= s->mb_skipped;
2594 d->mv_type= s->mv_type;
2595 d->mv_dir= s->mv_dir;
2597 if(s->data_partitioning){
2599 d->tex_pb= s->tex_pb;
2603 d->block_last_index[i]= s->block_last_index[i];
2604 d->interlaced_dct= s->interlaced_dct;
2605 d->qscale= s->qscale;
2607 d->esc3_level_length= s->esc3_level_length;
2610 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2611 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2612 int *dmin, int *next_block, int motion_x, int motion_y)
2615 uint8_t *dest_backup[3];
2617 copy_context_before_encode(s, backup, type);
2619 s->block= s->blocks[*next_block];
2620 s->pb= pb[*next_block];
2621 if(s->data_partitioning){
2622 s->pb2 = pb2 [*next_block];
2623 s->tex_pb= tex_pb[*next_block];
2627 memcpy(dest_backup, s->dest, sizeof(s->dest));
2628 s->dest[0] = s->sc.rd_scratchpad;
2629 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2630 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2631 av_assert0(s->linesize >= 32); //FIXME
2634 encode_mb(s, motion_x, motion_y);
2636 score= put_bits_count(&s->pb);
2637 if(s->data_partitioning){
2638 score+= put_bits_count(&s->pb2);
2639 score+= put_bits_count(&s->tex_pb);
2642 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2643 ff_mpv_decode_mb(s, s->block);
2645 score *= s->lambda2;
2646 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2650 memcpy(s->dest, dest_backup, sizeof(s->dest));
2657 copy_context_after_encode(best, s, type);
2661 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2662 uint32_t *sq = ff_square_tab + 256;
2667 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2668 else if(w==8 && h==8)
2669 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2673 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2682 static int sse_mb(MpegEncContext *s){
2686 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2687 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2690 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2691 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2692 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2693 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2695 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2696 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2697 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2700 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2701 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2702 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2705 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2706 MpegEncContext *s= *(void**)arg;
2710 s->me.dia_size= s->avctx->pre_dia_size;
2711 s->first_slice_line=1;
2712 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2713 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2714 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2716 s->first_slice_line=0;
2724 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2725 MpegEncContext *s= *(void**)arg;
2727 ff_check_alignment();
2729 s->me.dia_size= s->avctx->dia_size;
2730 s->first_slice_line=1;
2731 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2732 s->mb_x=0; //for block init below
2733 ff_init_block_index(s);
2734 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2735 s->block_index[0]+=2;
2736 s->block_index[1]+=2;
2737 s->block_index[2]+=2;
2738 s->block_index[3]+=2;
2740 /* compute motion vector & mb_type and store in context */
2741 if(s->pict_type==AV_PICTURE_TYPE_B)
2742 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2744 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2746 s->first_slice_line=0;
2751 static int mb_var_thread(AVCodecContext *c, void *arg){
2752 MpegEncContext *s= *(void**)arg;
2755 ff_check_alignment();
2757 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2758 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2761 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2763 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2765 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2766 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2768 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2769 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2770 s->me.mb_var_sum_temp += varc;
2776 static void write_slice_end(MpegEncContext *s){
2777 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2778 if(s->partitioned_frame){
2779 ff_mpeg4_merge_partitions(s);
2782 ff_mpeg4_stuffing(&s->pb);
2783 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2784 ff_mjpeg_encode_stuffing(s);
2787 avpriv_align_put_bits(&s->pb);
2788 flush_put_bits(&s->pb);
2790 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2791 s->misc_bits+= get_bits_diff(s);
2794 static void write_mb_info(MpegEncContext *s)
2796 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2797 int offset = put_bits_count(&s->pb);
2798 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2799 int gobn = s->mb_y / s->gob_index;
2801 if (CONFIG_H263_ENCODER)
2802 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2803 bytestream_put_le32(&ptr, offset);
2804 bytestream_put_byte(&ptr, s->qscale);
2805 bytestream_put_byte(&ptr, gobn);
2806 bytestream_put_le16(&ptr, mba);
2807 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2808 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2809 /* 4MV not implemented */
2810 bytestream_put_byte(&ptr, 0); /* hmv2 */
2811 bytestream_put_byte(&ptr, 0); /* vmv2 */
2814 static void update_mb_info(MpegEncContext *s, int startcode)
2818 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2819 s->mb_info_size += 12;
2820 s->prev_mb_info = s->last_mb_info;
2823 s->prev_mb_info = put_bits_count(&s->pb)/8;
2824 /* This might have incremented mb_info_size above, and we return without
2825 * actually writing any info into that slot yet. But in that case,
2826 * this will be called again at the start of the after writing the
2827 * start code, actually writing the mb info. */
2831 s->last_mb_info = put_bits_count(&s->pb)/8;
2832 if (!s->mb_info_size)
2833 s->mb_info_size += 12;
2837 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2839 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2840 && s->slice_context_count == 1
2841 && s->pb.buf == s->avctx->internal->byte_buffer) {
2842 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2843 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2845 uint8_t *new_buffer = NULL;
2846 int new_buffer_size = 0;
2848 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2849 s->avctx->internal->byte_buffer_size + size_increase);
2851 return AVERROR(ENOMEM);
2853 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2854 av_free(s->avctx->internal->byte_buffer);
2855 s->avctx->internal->byte_buffer = new_buffer;
2856 s->avctx->internal->byte_buffer_size = new_buffer_size;
2857 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2858 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2859 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2861 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2862 return AVERROR(EINVAL);
2866 static int encode_thread(AVCodecContext *c, void *arg){
2867 MpegEncContext *s= *(void**)arg;
2868 int mb_x, mb_y, pdif = 0;
2869 int chr_h= 16>>s->chroma_y_shift;
2871 MpegEncContext best_s = { 0 }, backup_s;
2872 uint8_t bit_buf[2][MAX_MB_BYTES];
2873 uint8_t bit_buf2[2][MAX_MB_BYTES];
2874 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2875 PutBitContext pb[2], pb2[2], tex_pb[2];
2877 ff_check_alignment();
2880 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2881 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2882 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2885 s->last_bits= put_bits_count(&s->pb);
2896 /* init last dc values */
2897 /* note: quant matrix value (8) is implied here */
2898 s->last_dc[i] = 128 << s->intra_dc_precision;
2900 s->current_picture.encoding_error[i] = 0;
2902 if(s->codec_id==AV_CODEC_ID_AMV){
2903 s->last_dc[0] = 128*8/13;
2904 s->last_dc[1] = 128*8/14;
2905 s->last_dc[2] = 128*8/14;
2908 memset(s->last_mv, 0, sizeof(s->last_mv));
2912 switch(s->codec_id){
2913 case AV_CODEC_ID_H263:
2914 case AV_CODEC_ID_H263P:
2915 case AV_CODEC_ID_FLV1:
2916 if (CONFIG_H263_ENCODER)
2917 s->gob_index = H263_GOB_HEIGHT(s->height);
2919 case AV_CODEC_ID_MPEG4:
2920 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2921 ff_mpeg4_init_partitions(s);
2927 s->first_slice_line = 1;
2928 s->ptr_lastgob = s->pb.buf;
2929 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2933 ff_set_qscale(s, s->qscale);
2934 ff_init_block_index(s);
2936 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2937 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2938 int mb_type= s->mb_type[xy];
2942 int size_increase = s->avctx->internal->byte_buffer_size/4
2943 + s->mb_width*MAX_MB_BYTES;
2945 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2946 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2947 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2950 if(s->data_partitioning){
2951 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2952 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2953 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2959 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2960 ff_update_block_index(s);
2962 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2963 ff_h261_reorder_mb_index(s);
2964 xy= s->mb_y*s->mb_stride + s->mb_x;
2965 mb_type= s->mb_type[xy];
2968 /* write gob / video packet header */
2970 int current_packet_size, is_gob_start;
2972 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2974 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2976 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2978 switch(s->codec_id){
2979 case AV_CODEC_ID_H263:
2980 case AV_CODEC_ID_H263P:
2981 if(!s->h263_slice_structured)
2982 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2984 case AV_CODEC_ID_MPEG2VIDEO:
2985 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2986 case AV_CODEC_ID_MPEG1VIDEO:
2987 if(s->mb_skip_run) is_gob_start=0;
2989 case AV_CODEC_ID_MJPEG:
2990 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2995 if(s->start_mb_y != mb_y || mb_x!=0){
2998 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2999 ff_mpeg4_init_partitions(s);
3003 av_assert2((put_bits_count(&s->pb)&7) == 0);
3004 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3006 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3007 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3008 int d = 100 / s->error_rate;
3010 current_packet_size=0;
3011 s->pb.buf_ptr= s->ptr_lastgob;
3012 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3016 #if FF_API_RTP_CALLBACK
3017 FF_DISABLE_DEPRECATION_WARNINGS
3018 if (s->avctx->rtp_callback){
3019 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3020 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3022 FF_ENABLE_DEPRECATION_WARNINGS
3024 update_mb_info(s, 1);
3026 switch(s->codec_id){
3027 case AV_CODEC_ID_MPEG4:
3028 if (CONFIG_MPEG4_ENCODER) {
3029 ff_mpeg4_encode_video_packet_header(s);
3030 ff_mpeg4_clean_buffers(s);
3033 case AV_CODEC_ID_MPEG1VIDEO:
3034 case AV_CODEC_ID_MPEG2VIDEO:
3035 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3036 ff_mpeg1_encode_slice_header(s);
3037 ff_mpeg1_clean_buffers(s);
3040 case AV_CODEC_ID_H263:
3041 case AV_CODEC_ID_H263P:
3042 if (CONFIG_H263_ENCODER)
3043 ff_h263_encode_gob_header(s, mb_y);
3047 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3048 int bits= put_bits_count(&s->pb);
3049 s->misc_bits+= bits - s->last_bits;
3053 s->ptr_lastgob += current_packet_size;
3054 s->first_slice_line=1;
3055 s->resync_mb_x=mb_x;
3056 s->resync_mb_y=mb_y;
3060 if( (s->resync_mb_x == s->mb_x)
3061 && s->resync_mb_y+1 == s->mb_y){
3062 s->first_slice_line=0;
3066 s->dquant=0; //only for QP_RD
3068 update_mb_info(s, 0);
3070 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3072 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3074 copy_context_before_encode(&backup_s, s, -1);
3076 best_s.data_partitioning= s->data_partitioning;
3077 best_s.partitioned_frame= s->partitioned_frame;
3078 if(s->data_partitioning){
3079 backup_s.pb2= s->pb2;
3080 backup_s.tex_pb= s->tex_pb;
3083 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3084 s->mv_dir = MV_DIR_FORWARD;
3085 s->mv_type = MV_TYPE_16X16;
3087 s->mv[0][0][0] = s->p_mv_table[xy][0];
3088 s->mv[0][0][1] = s->p_mv_table[xy][1];
3089 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3090 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3092 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3093 s->mv_dir = MV_DIR_FORWARD;
3094 s->mv_type = MV_TYPE_FIELD;
3097 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3098 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3099 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3101 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3102 &dmin, &next_block, 0, 0);
3104 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3105 s->mv_dir = MV_DIR_FORWARD;
3106 s->mv_type = MV_TYPE_16X16;
3110 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3111 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3113 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3114 s->mv_dir = MV_DIR_FORWARD;
3115 s->mv_type = MV_TYPE_8X8;
3118 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3119 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3121 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3122 &dmin, &next_block, 0, 0);
3124 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3125 s->mv_dir = MV_DIR_FORWARD;
3126 s->mv_type = MV_TYPE_16X16;
3128 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3129 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3130 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3131 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3133 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3134 s->mv_dir = MV_DIR_BACKWARD;
3135 s->mv_type = MV_TYPE_16X16;
3137 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3138 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3139 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3140 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3142 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3143 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3144 s->mv_type = MV_TYPE_16X16;
3146 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3147 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3148 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3149 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3150 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3151 &dmin, &next_block, 0, 0);
3153 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3154 s->mv_dir = MV_DIR_FORWARD;
3155 s->mv_type = MV_TYPE_FIELD;
3158 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3159 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3160 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3162 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3163 &dmin, &next_block, 0, 0);
3165 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3166 s->mv_dir = MV_DIR_BACKWARD;
3167 s->mv_type = MV_TYPE_FIELD;
3170 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3171 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3172 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3174 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3175 &dmin, &next_block, 0, 0);
3177 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3178 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3179 s->mv_type = MV_TYPE_FIELD;
3181 for(dir=0; dir<2; dir++){
3183 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3184 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3185 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3188 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3189 &dmin, &next_block, 0, 0);
3191 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3193 s->mv_type = MV_TYPE_16X16;
3197 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3198 &dmin, &next_block, 0, 0);
3199 if(s->h263_pred || s->h263_aic){
3201 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3203 ff_clean_intra_table_entries(s); //old mode?
3207 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3208 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3209 const int last_qp= backup_s.qscale;
3212 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3213 static const int dquant_tab[4]={-1,1,-2,2};
3214 int storecoefs = s->mb_intra && s->dc_val[0];
3216 av_assert2(backup_s.dquant == 0);
3219 s->mv_dir= best_s.mv_dir;
3220 s->mv_type = MV_TYPE_16X16;
3221 s->mb_intra= best_s.mb_intra;
3222 s->mv[0][0][0] = best_s.mv[0][0][0];
3223 s->mv[0][0][1] = best_s.mv[0][0][1];
3224 s->mv[1][0][0] = best_s.mv[1][0][0];
3225 s->mv[1][0][1] = best_s.mv[1][0][1];
3227 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3228 for(; qpi<4; qpi++){
3229 int dquant= dquant_tab[qpi];
3230 qp= last_qp + dquant;
3231 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3233 backup_s.dquant= dquant;
3236 dc[i]= s->dc_val[0][ s->block_index[i] ];
3237 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3241 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3242 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3243 if(best_s.qscale != qp){
3246 s->dc_val[0][ s->block_index[i] ]= dc[i];
3247 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3254 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3255 int mx= s->b_direct_mv_table[xy][0];
3256 int my= s->b_direct_mv_table[xy][1];
3258 backup_s.dquant = 0;
3259 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3261 ff_mpeg4_set_direct_mv(s, mx, my);
3262 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3263 &dmin, &next_block, mx, my);
3265 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3266 backup_s.dquant = 0;
3267 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3269 ff_mpeg4_set_direct_mv(s, 0, 0);
3270 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3271 &dmin, &next_block, 0, 0);
3273 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3276 coded |= s->block_last_index[i];
3279 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3280 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3281 mx=my=0; //FIXME find the one we actually used
3282 ff_mpeg4_set_direct_mv(s, mx, my);
3283 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3291 s->mv_dir= best_s.mv_dir;
3292 s->mv_type = best_s.mv_type;
3294 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3295 s->mv[0][0][1] = best_s.mv[0][0][1];
3296 s->mv[1][0][0] = best_s.mv[1][0][0];
3297 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3300 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3301 &dmin, &next_block, mx, my);
3306 s->current_picture.qscale_table[xy] = best_s.qscale;
3308 copy_context_after_encode(s, &best_s, -1);
3310 pb_bits_count= put_bits_count(&s->pb);
3311 flush_put_bits(&s->pb);
3312 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3315 if(s->data_partitioning){
3316 pb2_bits_count= put_bits_count(&s->pb2);
3317 flush_put_bits(&s->pb2);
3318 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3319 s->pb2= backup_s.pb2;
3321 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3322 flush_put_bits(&s->tex_pb);
3323 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3324 s->tex_pb= backup_s.tex_pb;
3326 s->last_bits= put_bits_count(&s->pb);
3328 if (CONFIG_H263_ENCODER &&
3329 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3330 ff_h263_update_motion_val(s);
3332 if(next_block==0){ //FIXME 16 vs linesize16
3333 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3334 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3335 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3338 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3339 ff_mpv_decode_mb(s, s->block);
3341 int motion_x = 0, motion_y = 0;
3342 s->mv_type=MV_TYPE_16X16;
3343 // only one MB-Type possible
3346 case CANDIDATE_MB_TYPE_INTRA:
3349 motion_x= s->mv[0][0][0] = 0;
3350 motion_y= s->mv[0][0][1] = 0;
3352 case CANDIDATE_MB_TYPE_INTER:
3353 s->mv_dir = MV_DIR_FORWARD;
3355 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3356 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3358 case CANDIDATE_MB_TYPE_INTER_I:
3359 s->mv_dir = MV_DIR_FORWARD;
3360 s->mv_type = MV_TYPE_FIELD;
3363 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3364 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3365 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3368 case CANDIDATE_MB_TYPE_INTER4V:
3369 s->mv_dir = MV_DIR_FORWARD;
3370 s->mv_type = MV_TYPE_8X8;
3373 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3374 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3377 case CANDIDATE_MB_TYPE_DIRECT:
3378 if (CONFIG_MPEG4_ENCODER) {
3379 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3381 motion_x=s->b_direct_mv_table[xy][0];
3382 motion_y=s->b_direct_mv_table[xy][1];
3383 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3386 case CANDIDATE_MB_TYPE_DIRECT0:
3387 if (CONFIG_MPEG4_ENCODER) {
3388 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3390 ff_mpeg4_set_direct_mv(s, 0, 0);
3393 case CANDIDATE_MB_TYPE_BIDIR:
3394 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3396 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3397 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3398 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3399 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3401 case CANDIDATE_MB_TYPE_BACKWARD:
3402 s->mv_dir = MV_DIR_BACKWARD;
3404 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3405 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3407 case CANDIDATE_MB_TYPE_FORWARD:
3408 s->mv_dir = MV_DIR_FORWARD;
3410 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3411 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3413 case CANDIDATE_MB_TYPE_FORWARD_I:
3414 s->mv_dir = MV_DIR_FORWARD;
3415 s->mv_type = MV_TYPE_FIELD;
3418 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3419 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3420 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3423 case CANDIDATE_MB_TYPE_BACKWARD_I:
3424 s->mv_dir = MV_DIR_BACKWARD;
3425 s->mv_type = MV_TYPE_FIELD;
3428 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3429 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3430 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3433 case CANDIDATE_MB_TYPE_BIDIR_I:
3434 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3435 s->mv_type = MV_TYPE_FIELD;
3437 for(dir=0; dir<2; dir++){
3439 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3440 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3441 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3446 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3449 encode_mb(s, motion_x, motion_y);
3451 // RAL: Update last macroblock type
3452 s->last_mv_dir = s->mv_dir;
3454 if (CONFIG_H263_ENCODER &&
3455 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3456 ff_h263_update_motion_val(s);
3458 ff_mpv_decode_mb(s, s->block);
3461 /* clean the MV table in IPS frames for direct mode in B frames */
3462 if(s->mb_intra /* && I,P,S_TYPE */){
3463 s->p_mv_table[xy][0]=0;
3464 s->p_mv_table[xy][1]=0;
3467 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3471 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3472 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3474 s->current_picture.encoding_error[0] += sse(
3475 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3476 s->dest[0], w, h, s->linesize);
3477 s->current_picture.encoding_error[1] += sse(
3478 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3479 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3480 s->current_picture.encoding_error[2] += sse(
3481 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3482 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3485 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3486 ff_h263_loop_filter(s);
3488 ff_dlog(s->avctx, "MB %d %d bits\n",
3489 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3493 //not beautiful here but we must write it before flushing so it has to be here
3494 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3495 ff_msmpeg4_encode_ext_header(s);
3499 #if FF_API_RTP_CALLBACK
3500 FF_DISABLE_DEPRECATION_WARNINGS
3501 /* Send the last GOB if RTP */
3502 if (s->avctx->rtp_callback) {
3503 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3504 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3505 /* Call the RTP callback to send the last GOB */
3507 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3509 FF_ENABLE_DEPRECATION_WARNINGS
3515 #define MERGE(field) dst->field += src->field; src->field=0
3516 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3517 MERGE(me.scene_change_score);
3518 MERGE(me.mc_mb_var_sum_temp);
3519 MERGE(me.mb_var_sum_temp);
3522 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3525 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3526 MERGE(dct_count[1]);
3535 MERGE(er.error_count);
3536 MERGE(padding_bug_score);
3537 MERGE(current_picture.encoding_error[0]);
3538 MERGE(current_picture.encoding_error[1]);
3539 MERGE(current_picture.encoding_error[2]);
3541 if(dst->avctx->noise_reduction){
3542 for(i=0; i<64; i++){
3543 MERGE(dct_error_sum[0][i]);
3544 MERGE(dct_error_sum[1][i]);
3548 assert(put_bits_count(&src->pb) % 8 ==0);
3549 assert(put_bits_count(&dst->pb) % 8 ==0);
3550 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3551 flush_put_bits(&dst->pb);
3554 static int estimate_qp(MpegEncContext *s, int dry_run){
3555 if (s->next_lambda){
3556 s->current_picture_ptr->f->quality =
3557 s->current_picture.f->quality = s->next_lambda;
3558 if(!dry_run) s->next_lambda= 0;
3559 } else if (!s->fixed_qscale) {
3560 s->current_picture_ptr->f->quality =
3561 s->current_picture.f->quality = ff_rate_estimate_qscale(s, dry_run);
3562 if (s->current_picture.f->quality < 0)
3566 if(s->adaptive_quant){
3567 switch(s->codec_id){
3568 case AV_CODEC_ID_MPEG4:
3569 if (CONFIG_MPEG4_ENCODER)
3570 ff_clean_mpeg4_qscales(s);
3572 case AV_CODEC_ID_H263:
3573 case AV_CODEC_ID_H263P:
3574 case AV_CODEC_ID_FLV1:
3575 if (CONFIG_H263_ENCODER)
3576 ff_clean_h263_qscales(s);
3579 ff_init_qscale_tab(s);
3582 s->lambda= s->lambda_table[0];
3585 s->lambda = s->current_picture.f->quality;
3590 /* must be called before writing the header */
3591 static void set_frame_distances(MpegEncContext * s){
3592 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3593 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3595 if(s->pict_type==AV_PICTURE_TYPE_B){
3596 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3597 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3599 s->pp_time= s->time - s->last_non_b_time;
3600 s->last_non_b_time= s->time;
3601 assert(s->picture_number==0 || s->pp_time > 0);
3605 static int encode_picture(MpegEncContext *s, int picture_number)
3609 int context_count = s->slice_context_count;
3611 s->picture_number = picture_number;
3613 /* Reset the average MB variance */
3614 s->me.mb_var_sum_temp =
3615 s->me.mc_mb_var_sum_temp = 0;
3617 /* we need to initialize some time vars before we can encode b-frames */
3618 // RAL: Condition added for MPEG1VIDEO
3619 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3620 set_frame_distances(s);
3621 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3622 ff_set_mpeg4_time(s);
3624 s->me.scene_change_score=0;
3626 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3628 if(s->pict_type==AV_PICTURE_TYPE_I){
3629 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3630 else s->no_rounding=0;
3631 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3632 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3633 s->no_rounding ^= 1;
3636 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3637 if (estimate_qp(s,1) < 0)
3639 ff_get_2pass_fcode(s);
3640 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3641 if(s->pict_type==AV_PICTURE_TYPE_B)
3642 s->lambda= s->last_lambda_for[s->pict_type];
3644 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3648 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3649 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3650 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3651 s->q_chroma_intra_matrix = s->q_intra_matrix;
3652 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3655 s->mb_intra=0; //for the rate distortion & bit compare functions
3656 for(i=1; i<context_count; i++){
3657 ret = ff_update_duplicate_context(s->thread_context[i], s);
3665 /* Estimate motion for every MB */
3666 if(s->pict_type != AV_PICTURE_TYPE_I){
3667 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3668 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3669 if (s->pict_type != AV_PICTURE_TYPE_B) {
3670 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3671 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3675 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3676 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3678 for(i=0; i<s->mb_stride*s->mb_height; i++)
3679 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3681 if(!s->fixed_qscale){
3682 /* finding spatial complexity for I-frame rate control */
3683 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3686 for(i=1; i<context_count; i++){
3687 merge_context_after_me(s, s->thread_context[i]);
3689 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3690 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3693 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3694 s->pict_type= AV_PICTURE_TYPE_I;
3695 for(i=0; i<s->mb_stride*s->mb_height; i++)
3696 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3697 if(s->msmpeg4_version >= 3)
3699 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3700 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3704 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3705 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3707 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3709 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3710 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3711 s->f_code= FFMAX3(s->f_code, a, b);
3714 ff_fix_long_p_mvs(s);
3715 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3716 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3720 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3721 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3726 if(s->pict_type==AV_PICTURE_TYPE_B){
3729 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3730 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3731 s->f_code = FFMAX(a, b);
3733 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3734 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3735 s->b_code = FFMAX(a, b);
3737 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3738 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3739 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3740 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3741 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3743 for(dir=0; dir<2; dir++){
3746 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3747 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3748 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3749 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3757 if (estimate_qp(s, 0) < 0)
3760 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3761 s->pict_type == AV_PICTURE_TYPE_I &&
3762 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3763 s->qscale= 3; //reduce clipping problems
3765 if (s->out_format == FMT_MJPEG) {
3766 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3767 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3769 if (s->avctx->intra_matrix) {
3771 luma_matrix = s->avctx->intra_matrix;
3773 if (s->avctx->chroma_intra_matrix)
3774 chroma_matrix = s->avctx->chroma_intra_matrix;
3776 /* for mjpeg, we do include qscale in the matrix */
3778 int j = s->idsp.idct_permutation[i];
3780 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3781 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3783 s->y_dc_scale_table=
3784 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3785 s->chroma_intra_matrix[0] =
3786 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3787 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3788 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3789 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3790 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3793 if(s->codec_id == AV_CODEC_ID_AMV){
3794 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3795 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3797 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3799 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3800 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3802 s->y_dc_scale_table= y;
3803 s->c_dc_scale_table= c;
3804 s->intra_matrix[0] = 13;
3805 s->chroma_intra_matrix[0] = 14;
3806 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3807 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3808 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3809 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3813 //FIXME var duplication
3814 s->current_picture_ptr->f->key_frame =
3815 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3816 s->current_picture_ptr->f->pict_type =
3817 s->current_picture.f->pict_type = s->pict_type;
3819 if (s->current_picture.f->key_frame)
3820 s->picture_in_gop_number=0;
3822 s->mb_x = s->mb_y = 0;
3823 s->last_bits= put_bits_count(&s->pb);
3824 switch(s->out_format) {
3826 if (CONFIG_MJPEG_ENCODER)
3827 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3828 s->intra_matrix, s->chroma_intra_matrix);
3831 if (CONFIG_H261_ENCODER)
3832 ff_h261_encode_picture_header(s, picture_number);
3835 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3836 ff_wmv2_encode_picture_header(s, picture_number);
3837 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3838 ff_msmpeg4_encode_picture_header(s, picture_number);
3839 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3840 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3843 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3844 ret = ff_rv10_encode_picture_header(s, picture_number);
3848 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3849 ff_rv20_encode_picture_header(s, picture_number);
3850 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3851 ff_flv_encode_picture_header(s, picture_number);
3852 else if (CONFIG_H263_ENCODER)
3853 ff_h263_encode_picture_header(s, picture_number);
3856 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3857 ff_mpeg1_encode_picture_header(s, picture_number);
3862 bits= put_bits_count(&s->pb);
3863 s->header_bits= bits - s->last_bits;
3865 for(i=1; i<context_count; i++){
3866 update_duplicate_context_after_me(s->thread_context[i], s);
3868 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3869 for(i=1; i<context_count; i++){
3870 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3871 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3872 merge_context_after_encode(s, s->thread_context[i]);
3878 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3879 const int intra= s->mb_intra;
3882 s->dct_count[intra]++;
3884 for(i=0; i<64; i++){
3885 int level= block[i];
3889 s->dct_error_sum[intra][i] += level;
3890 level -= s->dct_offset[intra][i];
3891 if(level<0) level=0;
3893 s->dct_error_sum[intra][i] -= level;
3894 level += s->dct_offset[intra][i];
3895 if(level>0) level=0;
3902 static int dct_quantize_trellis_c(MpegEncContext *s,
3903 int16_t *block, int n,
3904 int qscale, int *overflow){
3906 const uint16_t *matrix;
3907 const uint8_t *scantable= s->intra_scantable.scantable;
3908 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3910 unsigned int threshold1, threshold2;
3922 int coeff_count[64];
3923 int qmul, qadd, start_i, last_non_zero, i, dc;
3924 const int esc_length= s->ac_esc_length;
3926 uint8_t * last_length;
3927 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3930 s->fdsp.fdct(block);
3932 if(s->dct_error_sum)
3933 s->denoise_dct(s, block);
3935 qadd= ((qscale-1)|1)*8;
3937 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3938 else mpeg2_qscale = qscale << 1;
3949 /* For AIC we skip quant/dequant of INTRADC */
3954 /* note: block[0] is assumed to be positive */
3955 block[0] = (block[0] + (q >> 1)) / q;
3958 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3959 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3960 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3961 bias= 1<<(QMAT_SHIFT-1);
3963 if (n > 3 && s->intra_chroma_ac_vlc_length) {
3964 length = s->intra_chroma_ac_vlc_length;
3965 last_length= s->intra_chroma_ac_vlc_last_length;
3967 length = s->intra_ac_vlc_length;
3968 last_length= s->intra_ac_vlc_last_length;
3973 qmat = s->q_inter_matrix[qscale];
3974 matrix = s->inter_matrix;
3975 length = s->inter_ac_vlc_length;
3976 last_length= s->inter_ac_vlc_last_length;
3980 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3981 threshold2= (threshold1<<1);
3983 for(i=63; i>=start_i; i--) {
3984 const int j = scantable[i];
3985 int level = block[j] * qmat[j];
3987 if(((unsigned)(level+threshold1))>threshold2){
3993 for(i=start_i; i<=last_non_zero; i++) {
3994 const int j = scantable[i];
3995 int level = block[j] * qmat[j];
3997 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3998 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3999 if(((unsigned)(level+threshold1))>threshold2){
4001 level= (bias + level)>>QMAT_SHIFT;
4003 coeff[1][i]= level-1;
4004 // coeff[2][k]= level-2;
4006 level= (bias - level)>>QMAT_SHIFT;
4007 coeff[0][i]= -level;
4008 coeff[1][i]= -level+1;
4009 // coeff[2][k]= -level+2;
4011 coeff_count[i]= FFMIN(level, 2);
4012 av_assert2(coeff_count[i]);
4015 coeff[0][i]= (level>>31)|1;
4020 *overflow= s->max_qcoeff < max; //overflow might have happened
4022 if(last_non_zero < start_i){
4023 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4024 return last_non_zero;
4027 score_tab[start_i]= 0;
4028 survivor[0]= start_i;
4031 for(i=start_i; i<=last_non_zero; i++){
4032 int level_index, j, zero_distortion;
4033 int dct_coeff= FFABS(block[ scantable[i] ]);
4034 int best_score=256*256*256*120;
4036 if (s->fdsp.fdct == ff_fdct_ifast)
4037 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4038 zero_distortion= dct_coeff*dct_coeff;
4040 for(level_index=0; level_index < coeff_count[i]; level_index++){
4042 int level= coeff[level_index][i];
4043 const int alevel= FFABS(level);
4048 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4049 unquant_coeff= alevel*qmul + qadd;
4050 } else if(s->out_format == FMT_MJPEG) {
4051 j = s->idsp.idct_permutation[scantable[i]];
4052 unquant_coeff = alevel * matrix[j] * 8;
4054 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4056 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4057 unquant_coeff = (unquant_coeff - 1) | 1;
4059 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4060 unquant_coeff = (unquant_coeff - 1) | 1;
4065 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4067 if((level&(~127)) == 0){
4068 for(j=survivor_count-1; j>=0; j--){
4069 int run= i - survivor[j];
4070 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4071 score += score_tab[i-run];
4073 if(score < best_score){
4076 level_tab[i+1]= level-64;
4080 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4081 for(j=survivor_count-1; j>=0; j--){
4082 int run= i - survivor[j];
4083 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4084 score += score_tab[i-run];
4085 if(score < last_score){
4088 last_level= level-64;
4094 distortion += esc_length*lambda;
4095 for(j=survivor_count-1; j>=0; j--){
4096 int run= i - survivor[j];
4097 int score= distortion + score_tab[i-run];
4099 if(score < best_score){
4102 level_tab[i+1]= level-64;
4106 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4107 for(j=survivor_count-1; j>=0; j--){
4108 int run= i - survivor[j];
4109 int score= distortion + score_tab[i-run];
4110 if(score < last_score){
4113 last_level= level-64;
4121 score_tab[i+1]= best_score;
4123 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
4124 if(last_non_zero <= 27){
4125 for(; survivor_count; survivor_count--){
4126 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4130 for(; survivor_count; survivor_count--){
4131 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4136 survivor[ survivor_count++ ]= i+1;
4139 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4140 last_score= 256*256*256*120;
4141 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4142 int score= score_tab[i];
4143 if(i) score += lambda*2; //FIXME exacter?
4145 if(score < last_score){
4148 last_level= level_tab[i];
4149 last_run= run_tab[i];
4154 s->coded_score[n] = last_score;
4156 dc= FFABS(block[0]);
4157 last_non_zero= last_i - 1;
4158 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4160 if(last_non_zero < start_i)
4161 return last_non_zero;
4163 if(last_non_zero == 0 && start_i == 0){
4165 int best_score= dc * dc;
4167 for(i=0; i<coeff_count[0]; i++){
4168 int level= coeff[i][0];
4169 int alevel= FFABS(level);
4170 int unquant_coeff, score, distortion;
4172 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4173 unquant_coeff= (alevel*qmul + qadd)>>3;
4175 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4176 unquant_coeff = (unquant_coeff - 1) | 1;
4178 unquant_coeff = (unquant_coeff + 4) >> 3;
4179 unquant_coeff<<= 3 + 3;
4181 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4183 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4184 else score= distortion + esc_length*lambda;
4186 if(score < best_score){
4188 best_level= level - 64;
4191 block[0]= best_level;
4192 s->coded_score[n] = best_score - dc*dc;
4193 if(best_level == 0) return -1;
4194 else return last_non_zero;
4198 av_assert2(last_level);
4200 block[ perm_scantable[last_non_zero] ]= last_level;
4203 for(; i>start_i; i -= run_tab[i] + 1){
4204 block[ perm_scantable[i-1] ]= level_tab[i];
4207 return last_non_zero;
4210 //#define REFINE_STATS 1
4211 static int16_t basis[64][64];
4213 static void build_basis(uint8_t *perm){
4220 double s= 0.25*(1<<BASIS_SHIFT);
4222 int perm_index= perm[index];
4223 if(i==0) s*= sqrt(0.5);
4224 if(j==0) s*= sqrt(0.5);
4225 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4232 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4233 int16_t *block, int16_t *weight, int16_t *orig,
4236 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4237 const uint8_t *scantable= s->intra_scantable.scantable;
4238 const uint8_t *perm_scantable= s->intra_scantable.permutated;
4239 // unsigned int threshold1, threshold2;
4244 int qmul, qadd, start_i, last_non_zero, i, dc;
4246 uint8_t * last_length;
4248 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4251 static int after_last=0;
4252 static int to_zero=0;
4253 static int from_zero=0;
4256 static int messed_sign=0;
4259 if(basis[0][0] == 0)
4260 build_basis(s->idsp.idct_permutation);
4271 /* For AIC we skip quant/dequant of INTRADC */
4275 q <<= RECON_SHIFT-3;
4276 /* note: block[0] is assumed to be positive */
4278 // block[0] = (block[0] + (q >> 1)) / q;
4280 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4281 // bias= 1<<(QMAT_SHIFT-1);
4282 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4283 length = s->intra_chroma_ac_vlc_length;
4284 last_length= s->intra_chroma_ac_vlc_last_length;
4286 length = s->intra_ac_vlc_length;
4287 last_length= s->intra_ac_vlc_last_length;
4292 length = s->inter_ac_vlc_length;
4293 last_length= s->inter_ac_vlc_last_length;
4295 last_non_zero = s->block_last_index[n];
4300 dc += (1<<(RECON_SHIFT-1));
4301 for(i=0; i<64; i++){
4302 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
4305 STOP_TIMER("memset rem[]")}
4308 for(i=0; i<64; i++){
4313 w= FFABS(weight[i]) + qns*one;
4314 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4317 // w=weight[i] = (63*qns + (w/2)) / w;
4320 av_assert2(w<(1<<6));
4323 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4329 for(i=start_i; i<=last_non_zero; i++){
4330 int j= perm_scantable[i];
4331 const int level= block[j];
4335 if(level<0) coeff= qmul*level - qadd;
4336 else coeff= qmul*level + qadd;
4337 run_tab[rle_index++]=run;
4340 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4346 if(last_non_zero>0){
4347 STOP_TIMER("init rem[]")
4354 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4357 int run2, best_unquant_change=0, analyze_gradient;
4361 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4363 if(analyze_gradient){
4367 for(i=0; i<64; i++){
4370 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4373 STOP_TIMER("rem*w*w")}
4383 const int level= block[0];
4384 int change, old_coeff;
4386 av_assert2(s->mb_intra);
4390 for(change=-1; change<=1; change+=2){
4391 int new_level= level + change;
4392 int score, new_coeff;
4394 new_coeff= q*new_level;
4395 if(new_coeff >= 2048 || new_coeff < 0)
4398 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4399 new_coeff - old_coeff);
4400 if(score<best_score){
4403 best_change= change;
4404 best_unquant_change= new_coeff - old_coeff;
4411 run2= run_tab[rle_index++];
4415 for(i=start_i; i<64; i++){
4416 int j= perm_scantable[i];
4417 const int level= block[j];
4418 int change, old_coeff;
4420 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4424 if(level<0) old_coeff= qmul*level - qadd;
4425 else old_coeff= qmul*level + qadd;
4426 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4430 av_assert2(run2>=0 || i >= last_non_zero );
4433 for(change=-1; change<=1; change+=2){
4434 int new_level= level + change;
4435 int score, new_coeff, unquant_change;
4438 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4442 if(new_level<0) new_coeff= qmul*new_level - qadd;
4443 else new_coeff= qmul*new_level + qadd;
4444 if(new_coeff >= 2048 || new_coeff <= -2048)
4446 //FIXME check for overflow
4449 if(level < 63 && level > -63){
4450 if(i < last_non_zero)
4451 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4452 - length[UNI_AC_ENC_INDEX(run, level+64)];
4454 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4455 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4458 av_assert2(FFABS(new_level)==1);
4460 if(analyze_gradient){
4461 int g= d1[ scantable[i] ];
4462 if(g && (g^new_level) >= 0)
4466 if(i < last_non_zero){
4467 int next_i= i + run2 + 1;
4468 int next_level= block[ perm_scantable[next_i] ] + 64;
4470 if(next_level&(~127))
4473 if(next_i < last_non_zero)
4474 score += length[UNI_AC_ENC_INDEX(run, 65)]
4475 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4476 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4478 score += length[UNI_AC_ENC_INDEX(run, 65)]
4479 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4480 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4482 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4484 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4485 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4491 av_assert2(FFABS(level)==1);
4493 if(i < last_non_zero){
4494 int next_i= i + run2 + 1;
4495 int next_level= block[ perm_scantable[next_i] ] + 64;
4497 if(next_level&(~127))
4500 if(next_i < last_non_zero)
4501 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4502 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4503 - length[UNI_AC_ENC_INDEX(run, 65)];
4505 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4506 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4507 - length[UNI_AC_ENC_INDEX(run, 65)];
4509 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4511 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4512 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4519 unquant_change= new_coeff - old_coeff;
4520 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4522 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4524 if(score<best_score){
4527 best_change= change;
4528 best_unquant_change= unquant_change;
4532 prev_level= level + 64;
4533 if(prev_level&(~127))
4542 STOP_TIMER("iterative step")}
4546 int j= perm_scantable[ best_coeff ];
4548 block[j] += best_change;
4550 if(best_coeff > last_non_zero){
4551 last_non_zero= best_coeff;
4552 av_assert2(block[j]);
4559 if(block[j] - best_change){
4560 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4572 for(; last_non_zero>=start_i; last_non_zero--){
4573 if(block[perm_scantable[last_non_zero]])
4579 if(256*256*256*64 % count == 0){
4580 av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4585 for(i=start_i; i<=last_non_zero; i++){
4586 int j= perm_scantable[i];
4587 const int level= block[j];
4590 run_tab[rle_index++]=run;
4597 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4603 if(last_non_zero>0){
4604 STOP_TIMER("iterative search")
4609 return last_non_zero;
4613 * Permute an 8x8 block according to permuatation.
4614 * @param block the block which will be permuted according to
4615 * the given permutation vector
4616 * @param permutation the permutation vector
4617 * @param last the last non zero coefficient in scantable order, used to
4618 * speed the permutation up
4619 * @param scantable the used scantable, this is only used to speed the
4620 * permutation up, the block is not (inverse) permutated
4621 * to scantable order!
4623 void ff_block_permute(int16_t *block, uint8_t *permutation,
4624 const uint8_t *scantable, int last)
4631 //FIXME it is ok but not clean and might fail for some permutations
4632 // if (permutation[1] == 1)
4635 for (i = 0; i <= last; i++) {
4636 const int j = scantable[i];
4641 for (i = 0; i <= last; i++) {
4642 const int j = scantable[i];
4643 const int perm_j = permutation[j];
4644 block[perm_j] = temp[j];
4648 int ff_dct_quantize_c(MpegEncContext *s,
4649 int16_t *block, int n,
4650 int qscale, int *overflow)
4652 int i, j, level, last_non_zero, q, start_i;
4654 const uint8_t *scantable= s->intra_scantable.scantable;
4657 unsigned int threshold1, threshold2;
4659 s->fdsp.fdct(block);
4661 if(s->dct_error_sum)
4662 s->denoise_dct(s, block);
4672 /* For AIC we skip quant/dequant of INTRADC */
4675 /* note: block[0] is assumed to be positive */
4676 block[0] = (block[0] + (q >> 1)) / q;
4679 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4680 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4684 qmat = s->q_inter_matrix[qscale];
4685 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4687 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4688 threshold2= (threshold1<<1);
4689 for(i=63;i>=start_i;i--) {
4691 level = block[j] * qmat[j];
4693 if(((unsigned)(level+threshold1))>threshold2){
4700 for(i=start_i; i<=last_non_zero; i++) {
4702 level = block[j] * qmat[j];
4704 // if( bias+level >= (1<<QMAT_SHIFT)
4705 // || bias-level >= (1<<QMAT_SHIFT)){
4706 if(((unsigned)(level+threshold1))>threshold2){
4708 level= (bias + level)>>QMAT_SHIFT;
4711 level= (bias - level)>>QMAT_SHIFT;
4719 *overflow= s->max_qcoeff < max; //overflow might have happened
4721 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4722 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4723 ff_block_permute(block, s->idsp.idct_permutation,
4724 scantable, last_non_zero);
4726 return last_non_zero;
4729 #define OFFSET(x) offsetof(MpegEncContext, x)
4730 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4731 static const AVOption h263_options[] = {
4732 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4733 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4738 static const AVClass h263_class = {
4739 .class_name = "H.263 encoder",
4740 .item_name = av_default_item_name,
4741 .option = h263_options,
4742 .version = LIBAVUTIL_VERSION_INT,
4745 AVCodec ff_h263_encoder = {
4747 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4748 .type = AVMEDIA_TYPE_VIDEO,
4749 .id = AV_CODEC_ID_H263,
4750 .priv_data_size = sizeof(MpegEncContext),
4751 .init = ff_mpv_encode_init,
4752 .encode2 = ff_mpv_encode_picture,
4753 .close = ff_mpv_encode_end,
4754 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4755 .priv_class = &h263_class,
4758 static const AVOption h263p_options[] = {
4759 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4760 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4761 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4762 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4766 static const AVClass h263p_class = {
4767 .class_name = "H.263p encoder",
4768 .item_name = av_default_item_name,
4769 .option = h263p_options,
4770 .version = LIBAVUTIL_VERSION_INT,
4773 AVCodec ff_h263p_encoder = {
4775 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4776 .type = AVMEDIA_TYPE_VIDEO,
4777 .id = AV_CODEC_ID_H263P,
4778 .priv_data_size = sizeof(MpegEncContext),
4779 .init = ff_mpv_encode_init,
4780 .encode2 = ff_mpv_encode_picture,
4781 .close = ff_mpv_encode_end,
4782 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4783 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4784 .priv_class = &h263p_class,
4787 static const AVClass msmpeg4v2_class = {
4788 .class_name = "msmpeg4v2 encoder",
4789 .item_name = av_default_item_name,
4790 .option = ff_mpv_generic_options,
4791 .version = LIBAVUTIL_VERSION_INT,
4794 AVCodec ff_msmpeg4v2_encoder = {
4795 .name = "msmpeg4v2",
4796 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4797 .type = AVMEDIA_TYPE_VIDEO,
4798 .id = AV_CODEC_ID_MSMPEG4V2,
4799 .priv_data_size = sizeof(MpegEncContext),
4800 .init = ff_mpv_encode_init,
4801 .encode2 = ff_mpv_encode_picture,
4802 .close = ff_mpv_encode_end,
4803 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4804 .priv_class = &msmpeg4v2_class,
4807 static const AVClass msmpeg4v3_class = {
4808 .class_name = "msmpeg4v3 encoder",
4809 .item_name = av_default_item_name,
4810 .option = ff_mpv_generic_options,
4811 .version = LIBAVUTIL_VERSION_INT,
4814 AVCodec ff_msmpeg4v3_encoder = {
4816 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4817 .type = AVMEDIA_TYPE_VIDEO,
4818 .id = AV_CODEC_ID_MSMPEG4V3,
4819 .priv_data_size = sizeof(MpegEncContext),
4820 .init = ff_mpv_encode_init,
4821 .encode2 = ff_mpv_encode_picture,
4822 .close = ff_mpv_encode_end,
4823 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4824 .priv_class = &msmpeg4v3_class,
4827 static const AVClass wmv1_class = {
4828 .class_name = "wmv1 encoder",
4829 .item_name = av_default_item_name,
4830 .option = ff_mpv_generic_options,
4831 .version = LIBAVUTIL_VERSION_INT,
4834 AVCodec ff_wmv1_encoder = {
4836 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4837 .type = AVMEDIA_TYPE_VIDEO,
4838 .id = AV_CODEC_ID_WMV1,
4839 .priv_data_size = sizeof(MpegEncContext),
4840 .init = ff_mpv_encode_init,
4841 .encode2 = ff_mpv_encode_picture,
4842 .close = ff_mpv_encode_end,
4843 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4844 .priv_class = &wmv1_class,