2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "libavutil/timer.h"
46 #include "mpegvideo.h"
47 #include "mpegvideodata.h"
51 #include "mjpegenc_common.h"
53 #include "mpegutils.h"
56 #include "pixblockdsp.h"
60 #include "aandcttab.h"
62 #include "mpeg4video.h"
64 #include "bytestream.h"
71 #define QUANT_BIAS_SHIFT 8
73 #define QMAT_SHIFT_MMX 16
76 static int encode_picture(MpegEncContext *s, int picture_number);
77 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
78 static int sse_mb(MpegEncContext *s);
79 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
80 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
82 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
83 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
85 const AVOption ff_mpv_generic_options[] = {
90 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
91 uint16_t (*qmat16)[2][64],
92 const uint16_t *quant_matrix,
93 int bias, int qmin, int qmax, int intra)
95 FDCTDSPContext *fdsp = &s->fdsp;
99 for (qscale = qmin; qscale <= qmax; qscale++) {
103 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
104 else qscale2 = qscale << 1;
106 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
108 fdsp->fdct == ff_faandct ||
109 #endif /* CONFIG_FAANDCT */
110 fdsp->fdct == ff_jpeg_fdct_islow_10) {
111 for (i = 0; i < 64; i++) {
112 const int j = s->idsp.idct_permutation[i];
113 int64_t den = (int64_t) qscale2 * quant_matrix[j];
114 /* 16 <= qscale * quant_matrix[i] <= 7905
115 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
116 * 19952 <= x <= 249205026
117 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
118 * 3444240 >= (1 << 36) / (x) >= 275 */
120 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
122 } else if (fdsp->fdct == ff_fdct_ifast) {
123 for (i = 0; i < 64; i++) {
124 const int j = s->idsp.idct_permutation[i];
125 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
126 /* 16 <= qscale * quant_matrix[i] <= 7905
127 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
128 * 19952 <= x <= 249205026
129 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
130 * 3444240 >= (1 << 36) / (x) >= 275 */
132 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
135 for (i = 0; i < 64; i++) {
136 const int j = s->idsp.idct_permutation[i];
137 int64_t den = (int64_t) qscale2 * quant_matrix[j];
138 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
139 * Assume x = qscale * quant_matrix[i]
141 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
142 * so 32768 >= (1 << 19) / (x) >= 67 */
143 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
144 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
145 // (qscale * quant_matrix[i]);
146 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
148 if (qmat16[qscale][0][i] == 0 ||
149 qmat16[qscale][0][i] == 128 * 256)
150 qmat16[qscale][0][i] = 128 * 256 - 1;
151 qmat16[qscale][1][i] =
152 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
153 qmat16[qscale][0][i]);
157 for (i = intra; i < 64; i++) {
159 if (fdsp->fdct == ff_fdct_ifast) {
160 max = (8191LL * ff_aanscales[i]) >> 14;
162 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
168 av_log(NULL, AV_LOG_INFO,
169 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
174 static inline void update_qscale(MpegEncContext *s)
176 if (s->q_scale_type == 1 && 0) {
178 int bestdiff=INT_MAX;
181 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
182 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
183 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
184 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
186 if (diff < bestdiff) {
193 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
194 (FF_LAMBDA_SHIFT + 7);
195 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
198 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
202 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
208 for (i = 0; i < 64; i++) {
209 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
216 * init s->current_picture.qscale_table from s->lambda_table
218 void ff_init_qscale_tab(MpegEncContext *s)
220 int8_t * const qscale_table = s->current_picture.qscale_table;
223 for (i = 0; i < s->mb_num; i++) {
224 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
225 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
226 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
231 static void update_duplicate_context_after_me(MpegEncContext *dst,
234 #define COPY(a) dst->a= src->a
236 COPY(current_picture);
242 COPY(picture_in_gop_number);
243 COPY(gop_picture_number);
244 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
245 COPY(progressive_frame); // FIXME don't set in encode_header
246 COPY(partitioned_frame); // FIXME don't set in encode_header
251 * Set the given MpegEncContext to defaults for encoding.
252 * the changed fields will not depend upon the prior state of the MpegEncContext.
254 static void mpv_encode_defaults(MpegEncContext *s)
257 ff_mpv_common_defaults(s);
259 for (i = -16; i < 16; i++) {
260 default_fcode_tab[i + MAX_MV] = 1;
262 s->me.mv_penalty = default_mv_penalty;
263 s->fcode_tab = default_fcode_tab;
265 s->input_picture_number = 0;
266 s->picture_in_gop_number = 0;
269 av_cold int ff_dct_encode_init(MpegEncContext *s)
272 ff_dct_encode_init_x86(s);
274 if (CONFIG_H263_ENCODER)
275 ff_h263dsp_init(&s->h263dsp);
276 if (!s->dct_quantize)
277 s->dct_quantize = ff_dct_quantize_c;
279 s->denoise_dct = denoise_dct_c;
280 s->fast_dct_quantize = s->dct_quantize;
281 if (s->avctx->trellis)
282 s->dct_quantize = dct_quantize_trellis_c;
287 /* init video encoder */
288 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
290 MpegEncContext *s = avctx->priv_data;
291 AVCPBProperties *cpb_props;
292 int i, ret, format_supported;
294 mpv_encode_defaults(s);
296 switch (avctx->codec_id) {
297 case AV_CODEC_ID_MPEG2VIDEO:
298 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
299 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
300 av_log(avctx, AV_LOG_ERROR,
301 "only YUV420 and YUV422 are supported\n");
305 case AV_CODEC_ID_MJPEG:
306 case AV_CODEC_ID_AMV:
307 format_supported = 0;
308 /* JPEG color space */
309 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
310 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
311 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
312 (avctx->color_range == AVCOL_RANGE_JPEG &&
313 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
314 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
315 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
316 format_supported = 1;
317 /* MPEG color space */
318 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
319 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
320 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
321 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
322 format_supported = 1;
324 if (!format_supported) {
325 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
330 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
331 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
336 switch (avctx->pix_fmt) {
337 case AV_PIX_FMT_YUVJ444P:
338 case AV_PIX_FMT_YUV444P:
339 s->chroma_format = CHROMA_444;
341 case AV_PIX_FMT_YUVJ422P:
342 case AV_PIX_FMT_YUV422P:
343 s->chroma_format = CHROMA_422;
345 case AV_PIX_FMT_YUVJ420P:
346 case AV_PIX_FMT_YUV420P:
348 s->chroma_format = CHROMA_420;
352 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
354 #if FF_API_PRIVATE_OPT
355 FF_DISABLE_DEPRECATION_WARNINGS
356 if (avctx->rtp_payload_size)
357 s->rtp_payload_size = avctx->rtp_payload_size;
358 if (avctx->me_penalty_compensation)
359 s->me_penalty_compensation = avctx->me_penalty_compensation;
361 s->me_pre = avctx->pre_me;
362 FF_ENABLE_DEPRECATION_WARNINGS
365 s->bit_rate = avctx->bit_rate;
366 s->width = avctx->width;
367 s->height = avctx->height;
368 if (avctx->gop_size > 600 &&
369 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
370 av_log(avctx, AV_LOG_WARNING,
371 "keyframe interval too large!, reducing it from %d to %d\n",
372 avctx->gop_size, 600);
373 avctx->gop_size = 600;
375 s->gop_size = avctx->gop_size;
377 if (avctx->max_b_frames > MAX_B_FRAMES) {
378 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
379 "is %d.\n", MAX_B_FRAMES);
380 avctx->max_b_frames = MAX_B_FRAMES;
382 s->max_b_frames = avctx->max_b_frames;
383 s->codec_id = avctx->codec->id;
384 s->strict_std_compliance = avctx->strict_std_compliance;
385 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
386 s->rtp_mode = !!s->rtp_payload_size;
387 s->intra_dc_precision = avctx->intra_dc_precision;
389 // workaround some differences between how applications specify dc precision
390 if (s->intra_dc_precision < 0) {
391 s->intra_dc_precision += 8;
392 } else if (s->intra_dc_precision >= 8)
393 s->intra_dc_precision -= 8;
395 if (s->intra_dc_precision < 0) {
396 av_log(avctx, AV_LOG_ERROR,
397 "intra dc precision must be positive, note some applications use"
398 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
399 return AVERROR(EINVAL);
402 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
405 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
406 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
407 return AVERROR(EINVAL);
409 s->user_specified_pts = AV_NOPTS_VALUE;
411 if (s->gop_size <= 1) {
419 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
421 s->adaptive_quant = (s->avctx->lumi_masking ||
422 s->avctx->dark_masking ||
423 s->avctx->temporal_cplx_masking ||
424 s->avctx->spatial_cplx_masking ||
425 s->avctx->p_masking ||
427 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
430 s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
432 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
433 switch(avctx->codec_id) {
434 case AV_CODEC_ID_MPEG1VIDEO:
435 case AV_CODEC_ID_MPEG2VIDEO:
436 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
438 case AV_CODEC_ID_MPEG4:
439 case AV_CODEC_ID_MSMPEG4V1:
440 case AV_CODEC_ID_MSMPEG4V2:
441 case AV_CODEC_ID_MSMPEG4V3:
442 if (avctx->rc_max_rate >= 15000000) {
443 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
444 } else if(avctx->rc_max_rate >= 2000000) {
445 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
446 } else if(avctx->rc_max_rate >= 384000) {
447 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
449 avctx->rc_buffer_size = 40;
450 avctx->rc_buffer_size *= 16384;
453 if (avctx->rc_buffer_size) {
454 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
458 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
459 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
463 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
464 av_log(avctx, AV_LOG_INFO,
465 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
468 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
469 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
473 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
474 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
478 if (avctx->rc_max_rate &&
479 avctx->rc_max_rate == avctx->bit_rate &&
480 avctx->rc_max_rate != avctx->rc_min_rate) {
481 av_log(avctx, AV_LOG_INFO,
482 "impossible bitrate constraints, this will fail\n");
485 if (avctx->rc_buffer_size &&
486 avctx->bit_rate * (int64_t)avctx->time_base.num >
487 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
488 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
492 if (!s->fixed_qscale &&
493 avctx->bit_rate * av_q2d(avctx->time_base) >
494 avctx->bit_rate_tolerance) {
495 av_log(avctx, AV_LOG_WARNING,
496 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
497 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
500 if (s->avctx->rc_max_rate &&
501 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
502 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
503 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
504 90000LL * (avctx->rc_buffer_size - 1) >
505 s->avctx->rc_max_rate * 0xFFFFLL) {
506 av_log(avctx, AV_LOG_INFO,
507 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
508 "specified vbv buffer is too large for the given bitrate!\n");
511 if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
512 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
513 s->codec_id != AV_CODEC_ID_FLV1) {
514 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
518 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
519 av_log(avctx, AV_LOG_ERROR,
520 "OBMC is only supported with simple mb decision\n");
524 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
525 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
529 if (s->max_b_frames &&
530 s->codec_id != AV_CODEC_ID_MPEG4 &&
531 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
532 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
533 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
536 if (s->max_b_frames < 0) {
537 av_log(avctx, AV_LOG_ERROR,
538 "max b frames must be 0 or positive for mpegvideo based encoders\n");
542 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
543 s->codec_id == AV_CODEC_ID_H263 ||
544 s->codec_id == AV_CODEC_ID_H263P) &&
545 (avctx->sample_aspect_ratio.num > 255 ||
546 avctx->sample_aspect_ratio.den > 255)) {
547 av_log(avctx, AV_LOG_WARNING,
548 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
549 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
550 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
551 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
554 if ((s->codec_id == AV_CODEC_ID_H263 ||
555 s->codec_id == AV_CODEC_ID_H263P) &&
556 (avctx->width > 2048 ||
557 avctx->height > 1152 )) {
558 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
561 if ((s->codec_id == AV_CODEC_ID_H263 ||
562 s->codec_id == AV_CODEC_ID_H263P) &&
563 ((avctx->width &3) ||
564 (avctx->height&3) )) {
565 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
569 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
570 (avctx->width > 4095 ||
571 avctx->height > 4095 )) {
572 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
576 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
577 (avctx->width > 16383 ||
578 avctx->height > 16383 )) {
579 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
583 if (s->codec_id == AV_CODEC_ID_RV10 &&
585 avctx->height&15 )) {
586 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
587 return AVERROR(EINVAL);
590 if (s->codec_id == AV_CODEC_ID_RV20 &&
593 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
594 return AVERROR(EINVAL);
597 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
598 s->codec_id == AV_CODEC_ID_WMV2) &&
600 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
604 if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
605 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
606 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
610 #if FF_API_PRIVATE_OPT
611 FF_DISABLE_DEPRECATION_WARNINGS
612 if (avctx->mpeg_quant)
613 s->mpeg_quant = avctx->mpeg_quant;
614 FF_ENABLE_DEPRECATION_WARNINGS
617 // FIXME mpeg2 uses that too
618 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
619 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
620 av_log(avctx, AV_LOG_ERROR,
621 "mpeg2 style quantization not supported by codec\n");
625 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
626 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
630 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
631 s->avctx->mb_decision != FF_MB_DECISION_RD) {
632 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
636 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
637 (s->codec_id == AV_CODEC_ID_AMV ||
638 s->codec_id == AV_CODEC_ID_MJPEG)) {
639 // Used to produce garbage with MJPEG.
640 av_log(avctx, AV_LOG_ERROR,
641 "QP RD is no longer compatible with MJPEG or AMV\n");
645 #if FF_API_PRIVATE_OPT
646 FF_DISABLE_DEPRECATION_WARNINGS
647 if (avctx->scenechange_threshold)
648 s->scenechange_threshold = avctx->scenechange_threshold;
649 FF_ENABLE_DEPRECATION_WARNINGS
652 if (s->scenechange_threshold < 1000000000 &&
653 (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
654 av_log(avctx, AV_LOG_ERROR,
655 "closed gop with scene change detection are not supported yet, "
656 "set threshold to 1000000000\n");
660 if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
661 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
662 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
663 av_log(avctx, AV_LOG_ERROR,
664 "low delay forcing is only available for mpeg2, "
665 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
668 if (s->max_b_frames != 0) {
669 av_log(avctx, AV_LOG_ERROR,
670 "B-frames cannot be used with low delay\n");
675 if (s->q_scale_type == 1) {
676 if (avctx->qmax > 28) {
677 av_log(avctx, AV_LOG_ERROR,
678 "non linear quant only supports qmax <= 28 currently\n");
683 if (avctx->slices > 1 &&
684 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
685 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
686 return AVERROR(EINVAL);
689 if (s->avctx->thread_count > 1 &&
690 s->codec_id != AV_CODEC_ID_MPEG4 &&
691 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
692 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
693 s->codec_id != AV_CODEC_ID_MJPEG &&
694 (s->codec_id != AV_CODEC_ID_H263P)) {
695 av_log(avctx, AV_LOG_ERROR,
696 "multi threaded encoding not supported by codec\n");
700 if (s->avctx->thread_count < 1) {
701 av_log(avctx, AV_LOG_ERROR,
702 "automatic thread number detection not supported by codec, "
707 if (!avctx->time_base.den || !avctx->time_base.num) {
708 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
712 #if FF_API_PRIVATE_OPT
713 FF_DISABLE_DEPRECATION_WARNINGS
714 if (avctx->b_frame_strategy)
715 s->b_frame_strategy = avctx->b_frame_strategy;
716 if (avctx->b_sensitivity != 40)
717 s->b_sensitivity = avctx->b_sensitivity;
718 FF_ENABLE_DEPRECATION_WARNINGS
721 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
722 av_log(avctx, AV_LOG_INFO,
723 "notice: b_frame_strategy only affects the first pass\n");
724 s->b_frame_strategy = 0;
727 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
729 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
730 avctx->time_base.den /= i;
731 avctx->time_base.num /= i;
735 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
736 // (a + x * 3 / 8) / x
737 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
738 s->inter_quant_bias = 0;
740 s->intra_quant_bias = 0;
742 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
745 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
746 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
747 return AVERROR(EINVAL);
750 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
752 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
753 s->avctx->time_base.den > (1 << 16) - 1) {
754 av_log(avctx, AV_LOG_ERROR,
755 "timebase %d/%d not supported by MPEG 4 standard, "
756 "the maximum admitted value for the timebase denominator "
757 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
761 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
763 switch (avctx->codec->id) {
764 case AV_CODEC_ID_MPEG1VIDEO:
765 s->out_format = FMT_MPEG1;
766 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
767 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
769 case AV_CODEC_ID_MPEG2VIDEO:
770 s->out_format = FMT_MPEG1;
771 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
772 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
775 case AV_CODEC_ID_MJPEG:
776 case AV_CODEC_ID_AMV:
777 s->out_format = FMT_MJPEG;
778 s->intra_only = 1; /* force intra only for jpeg */
779 if (!CONFIG_MJPEG_ENCODER ||
780 ff_mjpeg_encode_init(s) < 0)
785 case AV_CODEC_ID_H261:
786 if (!CONFIG_H261_ENCODER)
788 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
789 av_log(avctx, AV_LOG_ERROR,
790 "The specified picture size of %dx%d is not valid for the "
791 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
792 s->width, s->height);
795 s->out_format = FMT_H261;
798 s->rtp_mode = 0; /* Sliced encoding not supported */
800 case AV_CODEC_ID_H263:
801 if (!CONFIG_H263_ENCODER)
803 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
804 s->width, s->height) == 8) {
805 av_log(avctx, AV_LOG_ERROR,
806 "The specified picture size of %dx%d is not valid for "
807 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
808 "352x288, 704x576, and 1408x1152. "
809 "Try H.263+.\n", s->width, s->height);
812 s->out_format = FMT_H263;
816 case AV_CODEC_ID_H263P:
817 s->out_format = FMT_H263;
820 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
821 s->modified_quant = s->h263_aic;
822 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
823 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
826 /* These are just to be sure */
830 case AV_CODEC_ID_FLV1:
831 s->out_format = FMT_H263;
832 s->h263_flv = 2; /* format = 1; 11-bit codes */
833 s->unrestricted_mv = 1;
834 s->rtp_mode = 0; /* don't allow GOB */
838 case AV_CODEC_ID_RV10:
839 s->out_format = FMT_H263;
843 case AV_CODEC_ID_RV20:
844 s->out_format = FMT_H263;
847 s->modified_quant = 1;
851 s->unrestricted_mv = 0;
853 case AV_CODEC_ID_MPEG4:
854 s->out_format = FMT_H263;
856 s->unrestricted_mv = 1;
857 s->low_delay = s->max_b_frames ? 0 : 1;
858 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
860 case AV_CODEC_ID_MSMPEG4V2:
861 s->out_format = FMT_H263;
863 s->unrestricted_mv = 1;
864 s->msmpeg4_version = 2;
868 case AV_CODEC_ID_MSMPEG4V3:
869 s->out_format = FMT_H263;
871 s->unrestricted_mv = 1;
872 s->msmpeg4_version = 3;
873 s->flipflop_rounding = 1;
877 case AV_CODEC_ID_WMV1:
878 s->out_format = FMT_H263;
880 s->unrestricted_mv = 1;
881 s->msmpeg4_version = 4;
882 s->flipflop_rounding = 1;
886 case AV_CODEC_ID_WMV2:
887 s->out_format = FMT_H263;
889 s->unrestricted_mv = 1;
890 s->msmpeg4_version = 5;
891 s->flipflop_rounding = 1;
899 #if FF_API_PRIVATE_OPT
900 FF_DISABLE_DEPRECATION_WARNINGS
901 if (avctx->noise_reduction)
902 s->noise_reduction = avctx->noise_reduction;
903 FF_ENABLE_DEPRECATION_WARNINGS
906 avctx->has_b_frames = !s->low_delay;
910 s->progressive_frame =
911 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
912 AV_CODEC_FLAG_INTERLACED_ME) ||
917 if (ff_mpv_common_init(s) < 0)
920 ff_fdctdsp_init(&s->fdsp, avctx);
921 ff_me_cmp_init(&s->mecc, avctx);
922 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
923 ff_pixblockdsp_init(&s->pdsp, avctx);
924 ff_qpeldsp_init(&s->qdsp);
926 if (s->msmpeg4_version) {
927 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
928 2 * 2 * (MAX_LEVEL + 1) *
929 (MAX_RUN + 1) * 2 * sizeof(int), fail);
931 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
933 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
934 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
935 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
936 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
937 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
938 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
939 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
940 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
941 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
942 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
945 if (s->noise_reduction) {
946 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
947 2 * 64 * sizeof(uint16_t), fail);
950 ff_dct_encode_init(s);
952 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
953 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
955 if (s->slice_context_count > 1) {
958 if (avctx->codec_id == AV_CODEC_ID_H263P)
959 s->h263_slice_structured = 1;
962 s->quant_precision = 5;
964 #if FF_API_PRIVATE_OPT
965 FF_DISABLE_DEPRECATION_WARNINGS
966 if (avctx->frame_skip_threshold)
967 s->frame_skip_threshold = avctx->frame_skip_threshold;
968 if (avctx->frame_skip_factor)
969 s->frame_skip_factor = avctx->frame_skip_factor;
970 if (avctx->frame_skip_exp)
971 s->frame_skip_exp = avctx->frame_skip_exp;
972 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
973 s->frame_skip_cmp = avctx->frame_skip_cmp;
974 FF_ENABLE_DEPRECATION_WARNINGS
977 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
978 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
980 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
981 ff_h261_encode_init(s);
982 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
983 ff_h263_encode_init(s);
984 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
985 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
987 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
988 && s->out_format == FMT_MPEG1)
989 ff_mpeg1_encode_init(s);
992 for (i = 0; i < 64; i++) {
993 int j = s->idsp.idct_permutation[i];
994 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
996 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
997 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
998 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1000 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1003 s->chroma_intra_matrix[j] =
1004 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1005 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1007 if (s->avctx->intra_matrix)
1008 s->intra_matrix[j] = s->avctx->intra_matrix[i];
1009 if (s->avctx->inter_matrix)
1010 s->inter_matrix[j] = s->avctx->inter_matrix[i];
1013 /* precompute matrix */
1014 /* for mjpeg, we do include qscale in the matrix */
1015 if (s->out_format != FMT_MJPEG) {
1016 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1017 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1019 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1020 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1024 if (ff_rate_control_init(s) < 0)
1027 #if FF_API_PRIVATE_OPT
1028 FF_DISABLE_DEPRECATION_WARNINGS
1029 if (avctx->brd_scale)
1030 s->brd_scale = avctx->brd_scale;
1032 if (avctx->prediction_method)
1033 s->pred = avctx->prediction_method + 1;
1034 FF_ENABLE_DEPRECATION_WARNINGS
1037 if (s->b_frame_strategy == 2) {
1038 for (i = 0; i < s->max_b_frames + 2; i++) {
1039 s->tmp_frames[i] = av_frame_alloc();
1040 if (!s->tmp_frames[i])
1041 return AVERROR(ENOMEM);
1043 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1044 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1045 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1047 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1053 cpb_props = ff_add_cpb_side_data(avctx);
1055 return AVERROR(ENOMEM);
1056 cpb_props->max_bitrate = avctx->rc_max_rate;
1057 cpb_props->min_bitrate = avctx->rc_min_rate;
1058 cpb_props->avg_bitrate = avctx->bit_rate;
1059 cpb_props->buffer_size = avctx->rc_buffer_size;
1063 ff_mpv_encode_end(avctx);
1064 return AVERROR_UNKNOWN;
1067 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1069 MpegEncContext *s = avctx->priv_data;
1072 ff_rate_control_uninit(s);
1074 ff_mpv_common_end(s);
1075 if (CONFIG_MJPEG_ENCODER &&
1076 s->out_format == FMT_MJPEG)
1077 ff_mjpeg_encode_close(s);
1079 av_freep(&avctx->extradata);
1081 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1082 av_frame_free(&s->tmp_frames[i]);
1084 ff_free_picture_tables(&s->new_picture);
1085 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1087 av_freep(&s->avctx->stats_out);
1088 av_freep(&s->ac_stats);
1090 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1091 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1092 s->q_chroma_intra_matrix= NULL;
1093 s->q_chroma_intra_matrix16= NULL;
1094 av_freep(&s->q_intra_matrix);
1095 av_freep(&s->q_inter_matrix);
1096 av_freep(&s->q_intra_matrix16);
1097 av_freep(&s->q_inter_matrix16);
1098 av_freep(&s->input_picture);
1099 av_freep(&s->reordered_input_picture);
1100 av_freep(&s->dct_offset);
1105 static int get_sae(uint8_t *src, int ref, int stride)
1110 for (y = 0; y < 16; y++) {
1111 for (x = 0; x < 16; x++) {
1112 acc += FFABS(src[x + y * stride] - ref);
1119 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1120 uint8_t *ref, int stride)
1126 h = s->height & ~15;
1128 for (y = 0; y < h; y += 16) {
1129 for (x = 0; x < w; x += 16) {
1130 int offset = x + y * stride;
1131 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1133 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1134 int sae = get_sae(src + offset, mean, stride);
1136 acc += sae + 500 < sad;
1142 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1144 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1145 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1146 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1147 &s->linesize, &s->uvlinesize);
1150 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1152 Picture *pic = NULL;
1154 int i, display_picture_number = 0, ret;
1155 int encoding_delay = s->max_b_frames ? s->max_b_frames
1156 : (s->low_delay ? 0 : 1);
1157 int flush_offset = 1;
1162 display_picture_number = s->input_picture_number++;
1164 if (pts != AV_NOPTS_VALUE) {
1165 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1166 int64_t last = s->user_specified_pts;
1169 av_log(s->avctx, AV_LOG_ERROR,
1170 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1172 return AVERROR(EINVAL);
1175 if (!s->low_delay && display_picture_number == 1)
1176 s->dts_delta = pts - last;
1178 s->user_specified_pts = pts;
1180 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1181 s->user_specified_pts =
1182 pts = s->user_specified_pts + 1;
1183 av_log(s->avctx, AV_LOG_INFO,
1184 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1187 pts = display_picture_number;
1191 if (!pic_arg->buf[0] ||
1192 pic_arg->linesize[0] != s->linesize ||
1193 pic_arg->linesize[1] != s->uvlinesize ||
1194 pic_arg->linesize[2] != s->uvlinesize)
1196 if ((s->width & 15) || (s->height & 15))
1198 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1200 if (s->linesize & (STRIDE_ALIGN-1))
1203 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1204 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1206 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1210 pic = &s->picture[i];
1214 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1217 ret = alloc_picture(s, pic, direct);
1222 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1223 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1224 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1227 int h_chroma_shift, v_chroma_shift;
1228 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1232 for (i = 0; i < 3; i++) {
1233 int src_stride = pic_arg->linesize[i];
1234 int dst_stride = i ? s->uvlinesize : s->linesize;
1235 int h_shift = i ? h_chroma_shift : 0;
1236 int v_shift = i ? v_chroma_shift : 0;
1237 int w = s->width >> h_shift;
1238 int h = s->height >> v_shift;
1239 uint8_t *src = pic_arg->data[i];
1240 uint8_t *dst = pic->f->data[i];
1243 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1244 && !s->progressive_sequence
1245 && FFALIGN(s->height, 32) - s->height > 16)
1248 if (!s->avctx->rc_buffer_size)
1249 dst += INPLACE_OFFSET;
1251 if (src_stride == dst_stride)
1252 memcpy(dst, src, src_stride * h);
1255 uint8_t *dst2 = dst;
1257 memcpy(dst2, src, w);
1262 if ((s->width & 15) || (s->height & (vpad-1))) {
1263 s->mpvencdsp.draw_edges(dst, dst_stride,
1273 ret = av_frame_copy_props(pic->f, pic_arg);
1277 pic->f->display_picture_number = display_picture_number;
1278 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1280 /* Flushing: When we have not received enough input frames,
1281 * ensure s->input_picture[0] contains the first picture */
1282 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1283 if (s->input_picture[flush_offset])
1286 if (flush_offset <= 1)
1289 encoding_delay = encoding_delay - flush_offset + 1;
1292 /* shift buffer entries */
1293 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1294 s->input_picture[i - flush_offset] = s->input_picture[i];
1296 s->input_picture[encoding_delay] = (Picture*) pic;
1301 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1305 int64_t score64 = 0;
1307 for (plane = 0; plane < 3; plane++) {
1308 const int stride = p->f->linesize[plane];
1309 const int bw = plane ? 1 : 2;
1310 for (y = 0; y < s->mb_height * bw; y++) {
1311 for (x = 0; x < s->mb_width * bw; x++) {
1312 int off = p->shared ? 0 : 16;
1313 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1314 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1315 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1317 switch (FFABS(s->frame_skip_exp)) {
1318 case 0: score = FFMAX(score, v); break;
1319 case 1: score += FFABS(v); break;
1320 case 2: score64 += v * (int64_t)v; break;
1321 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1322 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1331 if (s->frame_skip_exp < 0)
1332 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1333 -1.0/s->frame_skip_exp);
1335 if (score64 < s->frame_skip_threshold)
1337 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1342 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1344 AVPacket pkt = { 0 };
1348 av_init_packet(&pkt);
1350 ret = avcodec_send_frame(c, frame);
1355 ret = avcodec_receive_packet(c, &pkt);
1358 av_packet_unref(&pkt);
1359 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1366 static int estimate_best_b_count(MpegEncContext *s)
1368 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1369 const int scale = s->brd_scale;
1370 int width = s->width >> scale;
1371 int height = s->height >> scale;
1372 int i, j, out_size, p_lambda, b_lambda, lambda2;
1373 int64_t best_rd = INT64_MAX;
1374 int best_b_count = -1;
1377 av_assert0(scale >= 0 && scale <= 3);
1380 //s->next_picture_ptr->quality;
1381 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1382 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1383 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1384 if (!b_lambda) // FIXME we should do this somewhere else
1385 b_lambda = p_lambda;
1386 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1389 for (i = 0; i < s->max_b_frames + 2; i++) {
1390 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1391 s->next_picture_ptr;
1394 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1395 pre_input = *pre_input_ptr;
1396 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1398 if (!pre_input.shared && i) {
1399 data[0] += INPLACE_OFFSET;
1400 data[1] += INPLACE_OFFSET;
1401 data[2] += INPLACE_OFFSET;
1404 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1405 s->tmp_frames[i]->linesize[0],
1407 pre_input.f->linesize[0],
1409 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1410 s->tmp_frames[i]->linesize[1],
1412 pre_input.f->linesize[1],
1413 width >> 1, height >> 1);
1414 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1415 s->tmp_frames[i]->linesize[2],
1417 pre_input.f->linesize[2],
1418 width >> 1, height >> 1);
1422 for (j = 0; j < s->max_b_frames + 1; j++) {
1426 if (!s->input_picture[j])
1429 c = avcodec_alloc_context3(NULL);
1431 return AVERROR(ENOMEM);
1435 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1436 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1437 c->mb_decision = s->avctx->mb_decision;
1438 c->me_cmp = s->avctx->me_cmp;
1439 c->mb_cmp = s->avctx->mb_cmp;
1440 c->me_sub_cmp = s->avctx->me_sub_cmp;
1441 c->pix_fmt = AV_PIX_FMT_YUV420P;
1442 c->time_base = s->avctx->time_base;
1443 c->max_b_frames = s->max_b_frames;
1445 ret = avcodec_open2(c, codec, NULL);
1449 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1450 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1452 out_size = encode_frame(c, s->tmp_frames[0]);
1458 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1460 for (i = 0; i < s->max_b_frames + 1; i++) {
1461 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1463 s->tmp_frames[i + 1]->pict_type = is_p ?
1464 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1465 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1467 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1473 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1476 /* get the delayed frames */
1477 out_size = encode_frame(c, NULL);
1482 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1484 rd += c->error[0] + c->error[1] + c->error[2];
1492 avcodec_free_context(&c);
1497 return best_b_count;
1500 static int select_input_picture(MpegEncContext *s)
1504 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1505 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1506 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1508 /* set next picture type & ordering */
1509 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1510 if (s->frame_skip_threshold || s->frame_skip_factor) {
1511 if (s->picture_in_gop_number < s->gop_size &&
1512 s->next_picture_ptr &&
1513 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1514 // FIXME check that the gop check above is +-1 correct
1515 av_frame_unref(s->input_picture[0]->f);
1517 ff_vbv_update(s, 0);
1523 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1524 !s->next_picture_ptr || s->intra_only) {
1525 s->reordered_input_picture[0] = s->input_picture[0];
1526 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1527 s->reordered_input_picture[0]->f->coded_picture_number =
1528 s->coded_picture_number++;
1532 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1533 for (i = 0; i < s->max_b_frames + 1; i++) {
1534 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1536 if (pict_num >= s->rc_context.num_entries)
1538 if (!s->input_picture[i]) {
1539 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1543 s->input_picture[i]->f->pict_type =
1544 s->rc_context.entry[pict_num].new_pict_type;
1548 if (s->b_frame_strategy == 0) {
1549 b_frames = s->max_b_frames;
1550 while (b_frames && !s->input_picture[b_frames])
1552 } else if (s->b_frame_strategy == 1) {
1553 for (i = 1; i < s->max_b_frames + 1; i++) {
1554 if (s->input_picture[i] &&
1555 s->input_picture[i]->b_frame_score == 0) {
1556 s->input_picture[i]->b_frame_score =
1558 s->input_picture[i ]->f->data[0],
1559 s->input_picture[i - 1]->f->data[0],
1563 for (i = 0; i < s->max_b_frames + 1; i++) {
1564 if (!s->input_picture[i] ||
1565 s->input_picture[i]->b_frame_score - 1 >
1566 s->mb_num / s->b_sensitivity)
1570 b_frames = FFMAX(0, i - 1);
1573 for (i = 0; i < b_frames + 1; i++) {
1574 s->input_picture[i]->b_frame_score = 0;
1576 } else if (s->b_frame_strategy == 2) {
1577 b_frames = estimate_best_b_count(s);
1584 for (i = b_frames - 1; i >= 0; i--) {
1585 int type = s->input_picture[i]->f->pict_type;
1586 if (type && type != AV_PICTURE_TYPE_B)
1589 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1590 b_frames == s->max_b_frames) {
1591 av_log(s->avctx, AV_LOG_ERROR,
1592 "warning, too many B-frames in a row\n");
1595 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1596 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1597 s->gop_size > s->picture_in_gop_number) {
1598 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1600 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1602 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1606 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1607 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1610 s->reordered_input_picture[0] = s->input_picture[b_frames];
1611 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1612 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1613 s->reordered_input_picture[0]->f->coded_picture_number =
1614 s->coded_picture_number++;
1615 for (i = 0; i < b_frames; i++) {
1616 s->reordered_input_picture[i + 1] = s->input_picture[i];
1617 s->reordered_input_picture[i + 1]->f->pict_type =
1619 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1620 s->coded_picture_number++;
1625 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1627 if (s->reordered_input_picture[0]) {
1628 s->reordered_input_picture[0]->reference =
1629 s->reordered_input_picture[0]->f->pict_type !=
1630 AV_PICTURE_TYPE_B ? 3 : 0;
1632 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1635 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1636 // input is a shared pix, so we can't modify it -> allocate a new
1637 // one & ensure that the shared one is reuseable
1640 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1643 pic = &s->picture[i];
1645 pic->reference = s->reordered_input_picture[0]->reference;
1646 if (alloc_picture(s, pic, 0) < 0) {
1650 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1654 /* mark us unused / free shared pic */
1655 av_frame_unref(s->reordered_input_picture[0]->f);
1656 s->reordered_input_picture[0]->shared = 0;
1658 s->current_picture_ptr = pic;
1660 // input is not a shared pix -> reuse buffer for current_pix
1661 s->current_picture_ptr = s->reordered_input_picture[0];
1662 for (i = 0; i < 4; i++) {
1663 s->new_picture.f->data[i] += INPLACE_OFFSET;
1666 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1667 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1668 s->current_picture_ptr)) < 0)
1671 s->picture_number = s->new_picture.f->display_picture_number;
1676 static void frame_end(MpegEncContext *s)
1678 if (s->unrestricted_mv &&
1679 s->current_picture.reference &&
1681 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1682 int hshift = desc->log2_chroma_w;
1683 int vshift = desc->log2_chroma_h;
1684 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1685 s->current_picture.f->linesize[0],
1686 s->h_edge_pos, s->v_edge_pos,
1687 EDGE_WIDTH, EDGE_WIDTH,
1688 EDGE_TOP | EDGE_BOTTOM);
1689 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1690 s->current_picture.f->linesize[1],
1691 s->h_edge_pos >> hshift,
1692 s->v_edge_pos >> vshift,
1693 EDGE_WIDTH >> hshift,
1694 EDGE_WIDTH >> vshift,
1695 EDGE_TOP | EDGE_BOTTOM);
1696 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1697 s->current_picture.f->linesize[2],
1698 s->h_edge_pos >> hshift,
1699 s->v_edge_pos >> vshift,
1700 EDGE_WIDTH >> hshift,
1701 EDGE_WIDTH >> vshift,
1702 EDGE_TOP | EDGE_BOTTOM);
1707 s->last_pict_type = s->pict_type;
1708 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1709 if (s->pict_type!= AV_PICTURE_TYPE_B)
1710 s->last_non_b_pict_type = s->pict_type;
1712 #if FF_API_CODED_FRAME
1713 FF_DISABLE_DEPRECATION_WARNINGS
1714 av_frame_unref(s->avctx->coded_frame);
1715 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1716 FF_ENABLE_DEPRECATION_WARNINGS
1718 #if FF_API_ERROR_FRAME
1719 FF_DISABLE_DEPRECATION_WARNINGS
1720 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1721 sizeof(s->current_picture.encoding_error));
1722 FF_ENABLE_DEPRECATION_WARNINGS
1726 static void update_noise_reduction(MpegEncContext *s)
1730 for (intra = 0; intra < 2; intra++) {
1731 if (s->dct_count[intra] > (1 << 16)) {
1732 for (i = 0; i < 64; i++) {
1733 s->dct_error_sum[intra][i] >>= 1;
1735 s->dct_count[intra] >>= 1;
1738 for (i = 0; i < 64; i++) {
1739 s->dct_offset[intra][i] = (s->noise_reduction *
1740 s->dct_count[intra] +
1741 s->dct_error_sum[intra][i] / 2) /
1742 (s->dct_error_sum[intra][i] + 1);
1747 static int frame_start(MpegEncContext *s)
1751 /* mark & release old frames */
1752 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1753 s->last_picture_ptr != s->next_picture_ptr &&
1754 s->last_picture_ptr->f->buf[0]) {
1755 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1758 s->current_picture_ptr->f->pict_type = s->pict_type;
1759 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1761 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1762 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1763 s->current_picture_ptr)) < 0)
1766 if (s->pict_type != AV_PICTURE_TYPE_B) {
1767 s->last_picture_ptr = s->next_picture_ptr;
1769 s->next_picture_ptr = s->current_picture_ptr;
1772 if (s->last_picture_ptr) {
1773 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1774 if (s->last_picture_ptr->f->buf[0] &&
1775 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1776 s->last_picture_ptr)) < 0)
1779 if (s->next_picture_ptr) {
1780 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1781 if (s->next_picture_ptr->f->buf[0] &&
1782 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1783 s->next_picture_ptr)) < 0)
1787 if (s->picture_structure!= PICT_FRAME) {
1789 for (i = 0; i < 4; i++) {
1790 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1791 s->current_picture.f->data[i] +=
1792 s->current_picture.f->linesize[i];
1794 s->current_picture.f->linesize[i] *= 2;
1795 s->last_picture.f->linesize[i] *= 2;
1796 s->next_picture.f->linesize[i] *= 2;
1800 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1801 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1802 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1803 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1804 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1805 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1807 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1808 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1811 if (s->dct_error_sum) {
1812 av_assert2(s->noise_reduction && s->encoding);
1813 update_noise_reduction(s);
1819 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1820 const AVFrame *pic_arg, int *got_packet)
1822 MpegEncContext *s = avctx->priv_data;
1823 int i, stuffing_count, ret;
1824 int context_count = s->slice_context_count;
1826 s->vbv_ignore_qmax = 0;
1828 s->picture_in_gop_number++;
1830 if (load_input_picture(s, pic_arg) < 0)
1833 if (select_input_picture(s) < 0) {
1838 if (s->new_picture.f->data[0]) {
1839 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1840 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1842 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1843 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1846 s->mb_info_ptr = av_packet_new_side_data(pkt,
1847 AV_PKT_DATA_H263_MB_INFO,
1848 s->mb_width*s->mb_height*12);
1849 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1852 for (i = 0; i < context_count; i++) {
1853 int start_y = s->thread_context[i]->start_mb_y;
1854 int end_y = s->thread_context[i]-> end_mb_y;
1855 int h = s->mb_height;
1856 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1857 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1859 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1862 s->pict_type = s->new_picture.f->pict_type;
1864 ret = frame_start(s);
1868 ret = encode_picture(s, s->picture_number);
1869 if (growing_buffer) {
1870 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1871 pkt->data = s->pb.buf;
1872 pkt->size = avctx->internal->byte_buffer_size;
1877 #if FF_API_STAT_BITS
1878 FF_DISABLE_DEPRECATION_WARNINGS
1879 avctx->header_bits = s->header_bits;
1880 avctx->mv_bits = s->mv_bits;
1881 avctx->misc_bits = s->misc_bits;
1882 avctx->i_tex_bits = s->i_tex_bits;
1883 avctx->p_tex_bits = s->p_tex_bits;
1884 avctx->i_count = s->i_count;
1885 // FIXME f/b_count in avctx
1886 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1887 avctx->skip_count = s->skip_count;
1888 FF_ENABLE_DEPRECATION_WARNINGS
1893 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1894 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1896 if (avctx->rc_buffer_size) {
1897 RateControlContext *rcc = &s->rc_context;
1898 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1899 int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1900 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1902 if (put_bits_count(&s->pb) > max_size &&
1903 s->lambda < s->lmax) {
1904 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1905 (s->qscale + 1) / s->qscale);
1906 if (s->adaptive_quant) {
1908 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1909 s->lambda_table[i] =
1910 FFMAX(s->lambda_table[i] + min_step,
1911 s->lambda_table[i] * (s->qscale + 1) /
1914 s->mb_skipped = 0; // done in frame_start()
1915 // done in encode_picture() so we must undo it
1916 if (s->pict_type == AV_PICTURE_TYPE_P) {
1917 if (s->flipflop_rounding ||
1918 s->codec_id == AV_CODEC_ID_H263P ||
1919 s->codec_id == AV_CODEC_ID_MPEG4)
1920 s->no_rounding ^= 1;
1922 if (s->pict_type != AV_PICTURE_TYPE_B) {
1923 s->time_base = s->last_time_base;
1924 s->last_non_b_time = s->time - s->pp_time;
1926 for (i = 0; i < context_count; i++) {
1927 PutBitContext *pb = &s->thread_context[i]->pb;
1928 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1930 s->vbv_ignore_qmax = 1;
1931 av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1935 av_assert0(s->avctx->rc_max_rate);
1938 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1939 ff_write_pass1_stats(s);
1941 for (i = 0; i < 4; i++) {
1942 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1943 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1945 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1946 s->current_picture_ptr->encoding_error,
1947 (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1950 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1951 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1952 s->misc_bits + s->i_tex_bits +
1954 flush_put_bits(&s->pb);
1955 s->frame_bits = put_bits_count(&s->pb);
1957 stuffing_count = ff_vbv_update(s, s->frame_bits);
1958 s->stuffing_bits = 8*stuffing_count;
1959 if (stuffing_count) {
1960 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1961 stuffing_count + 50) {
1962 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1966 switch (s->codec_id) {
1967 case AV_CODEC_ID_MPEG1VIDEO:
1968 case AV_CODEC_ID_MPEG2VIDEO:
1969 while (stuffing_count--) {
1970 put_bits(&s->pb, 8, 0);
1973 case AV_CODEC_ID_MPEG4:
1974 put_bits(&s->pb, 16, 0);
1975 put_bits(&s->pb, 16, 0x1C3);
1976 stuffing_count -= 4;
1977 while (stuffing_count--) {
1978 put_bits(&s->pb, 8, 0xFF);
1982 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1984 flush_put_bits(&s->pb);
1985 s->frame_bits = put_bits_count(&s->pb);
1988 /* update MPEG-1/2 vbv_delay for CBR */
1989 if (s->avctx->rc_max_rate &&
1990 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1991 s->out_format == FMT_MPEG1 &&
1992 90000LL * (avctx->rc_buffer_size - 1) <=
1993 s->avctx->rc_max_rate * 0xFFFFLL) {
1994 AVCPBProperties *props;
1997 int vbv_delay, min_delay;
1998 double inbits = s->avctx->rc_max_rate *
1999 av_q2d(s->avctx->time_base);
2000 int minbits = s->frame_bits - 8 *
2001 (s->vbv_delay_ptr - s->pb.buf - 1);
2002 double bits = s->rc_context.buffer_index + minbits - inbits;
2005 av_log(s->avctx, AV_LOG_ERROR,
2006 "Internal error, negative bits\n");
2008 assert(s->repeat_first_field == 0);
2010 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
2011 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
2012 s->avctx->rc_max_rate;
2014 vbv_delay = FFMAX(vbv_delay, min_delay);
2016 av_assert0(vbv_delay < 0xFFFF);
2018 s->vbv_delay_ptr[0] &= 0xF8;
2019 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2020 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2021 s->vbv_delay_ptr[2] &= 0x07;
2022 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2024 props = av_cpb_properties_alloc(&props_size);
2026 return AVERROR(ENOMEM);
2027 props->vbv_delay = vbv_delay * 300;
2029 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2030 (uint8_t*)props, props_size);
2036 #if FF_API_VBV_DELAY
2037 FF_DISABLE_DEPRECATION_WARNINGS
2038 avctx->vbv_delay = vbv_delay * 300;
2039 FF_ENABLE_DEPRECATION_WARNINGS
2042 s->total_bits += s->frame_bits;
2043 #if FF_API_STAT_BITS
2044 FF_DISABLE_DEPRECATION_WARNINGS
2045 avctx->frame_bits = s->frame_bits;
2046 FF_ENABLE_DEPRECATION_WARNINGS
2050 pkt->pts = s->current_picture.f->pts;
2051 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2052 if (!s->current_picture.f->coded_picture_number)
2053 pkt->dts = pkt->pts - s->dts_delta;
2055 pkt->dts = s->reordered_pts;
2056 s->reordered_pts = pkt->pts;
2058 pkt->dts = pkt->pts;
2059 if (s->current_picture.f->key_frame)
2060 pkt->flags |= AV_PKT_FLAG_KEY;
2062 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2067 /* release non-reference frames */
2068 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2069 if (!s->picture[i].reference)
2070 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2073 av_assert1((s->frame_bits & 7) == 0);
2075 pkt->size = s->frame_bits / 8;
2076 *got_packet = !!pkt->size;
2080 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2081 int n, int threshold)
2083 static const char tab[64] = {
2084 3, 2, 2, 1, 1, 1, 1, 1,
2085 1, 1, 1, 1, 1, 1, 1, 1,
2086 1, 1, 1, 1, 1, 1, 1, 1,
2087 0, 0, 0, 0, 0, 0, 0, 0,
2088 0, 0, 0, 0, 0, 0, 0, 0,
2089 0, 0, 0, 0, 0, 0, 0, 0,
2090 0, 0, 0, 0, 0, 0, 0, 0,
2091 0, 0, 0, 0, 0, 0, 0, 0
2096 int16_t *block = s->block[n];
2097 const int last_index = s->block_last_index[n];
2100 if (threshold < 0) {
2102 threshold = -threshold;
2106 /* Are all we could set to zero already zero? */
2107 if (last_index <= skip_dc - 1)
2110 for (i = 0; i <= last_index; i++) {
2111 const int j = s->intra_scantable.permutated[i];
2112 const int level = FFABS(block[j]);
2114 if (skip_dc && i == 0)
2118 } else if (level > 1) {
2124 if (score >= threshold)
2126 for (i = skip_dc; i <= last_index; i++) {
2127 const int j = s->intra_scantable.permutated[i];
2131 s->block_last_index[n] = 0;
2133 s->block_last_index[n] = -1;
2136 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2140 const int maxlevel = s->max_qcoeff;
2141 const int minlevel = s->min_qcoeff;
2145 i = 1; // skip clipping of intra dc
2149 for (; i <= last_index; i++) {
2150 const int j = s->intra_scantable.permutated[i];
2151 int level = block[j];
2153 if (level > maxlevel) {
2156 } else if (level < minlevel) {
2164 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2165 av_log(s->avctx, AV_LOG_INFO,
2166 "warning, clipping %d dct coefficients to %d..%d\n",
2167 overflow, minlevel, maxlevel);
2170 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2174 for (y = 0; y < 8; y++) {
2175 for (x = 0; x < 8; x++) {
2181 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2182 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2183 int v = ptr[x2 + y2 * stride];
2189 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2194 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2195 int motion_x, int motion_y,
2196 int mb_block_height,
2200 int16_t weight[12][64];
2201 int16_t orig[12][64];
2202 const int mb_x = s->mb_x;
2203 const int mb_y = s->mb_y;
2206 int dct_offset = s->linesize * 8; // default for progressive frames
2207 int uv_dct_offset = s->uvlinesize * 8;
2208 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2209 ptrdiff_t wrap_y, wrap_c;
2211 for (i = 0; i < mb_block_count; i++)
2212 skip_dct[i] = s->skipdct;
2214 if (s->adaptive_quant) {
2215 const int last_qp = s->qscale;
2216 const int mb_xy = mb_x + mb_y * s->mb_stride;
2218 s->lambda = s->lambda_table[mb_xy];
2221 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2222 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2223 s->dquant = s->qscale - last_qp;
2225 if (s->out_format == FMT_H263) {
2226 s->dquant = av_clip(s->dquant, -2, 2);
2228 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2230 if (s->pict_type == AV_PICTURE_TYPE_B) {
2231 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2234 if (s->mv_type == MV_TYPE_8X8)
2240 ff_set_qscale(s, last_qp + s->dquant);
2241 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2242 ff_set_qscale(s, s->qscale + s->dquant);
2244 wrap_y = s->linesize;
2245 wrap_c = s->uvlinesize;
2246 ptr_y = s->new_picture.f->data[0] +
2247 (mb_y * 16 * wrap_y) + mb_x * 16;
2248 ptr_cb = s->new_picture.f->data[1] +
2249 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2250 ptr_cr = s->new_picture.f->data[2] +
2251 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2253 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2254 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2255 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2256 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2257 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2259 16, 16, mb_x * 16, mb_y * 16,
2260 s->width, s->height);
2262 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2264 mb_block_width, mb_block_height,
2265 mb_x * mb_block_width, mb_y * mb_block_height,
2267 ptr_cb = ebuf + 16 * wrap_y;
2268 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2270 mb_block_width, mb_block_height,
2271 mb_x * mb_block_width, mb_y * mb_block_height,
2273 ptr_cr = ebuf + 16 * wrap_y + 16;
2277 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2278 int progressive_score, interlaced_score;
2280 s->interlaced_dct = 0;
2281 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2282 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2283 NULL, wrap_y, 8) - 400;
2285 if (progressive_score > 0) {
2286 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2287 NULL, wrap_y * 2, 8) +
2288 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2289 NULL, wrap_y * 2, 8);
2290 if (progressive_score > interlaced_score) {
2291 s->interlaced_dct = 1;
2293 dct_offset = wrap_y;
2294 uv_dct_offset = wrap_c;
2296 if (s->chroma_format == CHROMA_422 ||
2297 s->chroma_format == CHROMA_444)
2303 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2304 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2305 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2306 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2308 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2312 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2313 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2314 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2315 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2316 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2317 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2318 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2319 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2320 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2321 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2322 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2323 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2327 op_pixels_func (*op_pix)[4];
2328 qpel_mc_func (*op_qpix)[16];
2329 uint8_t *dest_y, *dest_cb, *dest_cr;
2331 dest_y = s->dest[0];
2332 dest_cb = s->dest[1];
2333 dest_cr = s->dest[2];
2335 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2336 op_pix = s->hdsp.put_pixels_tab;
2337 op_qpix = s->qdsp.put_qpel_pixels_tab;
2339 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2340 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2343 if (s->mv_dir & MV_DIR_FORWARD) {
2344 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2345 s->last_picture.f->data,
2347 op_pix = s->hdsp.avg_pixels_tab;
2348 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2350 if (s->mv_dir & MV_DIR_BACKWARD) {
2351 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2352 s->next_picture.f->data,
2356 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2357 int progressive_score, interlaced_score;
2359 s->interlaced_dct = 0;
2360 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2361 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2365 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2366 progressive_score -= 400;
2368 if (progressive_score > 0) {
2369 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2371 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2375 if (progressive_score > interlaced_score) {
2376 s->interlaced_dct = 1;
2378 dct_offset = wrap_y;
2379 uv_dct_offset = wrap_c;
2381 if (s->chroma_format == CHROMA_422)
2387 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2388 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2389 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2390 dest_y + dct_offset, wrap_y);
2391 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2392 dest_y + dct_offset + 8, wrap_y);
2394 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2398 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2399 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2400 if (!s->chroma_y_shift) { /* 422 */
2401 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2402 dest_cb + uv_dct_offset, wrap_c);
2403 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2404 dest_cr + uv_dct_offset, wrap_c);
2407 /* pre quantization */
2408 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2409 2 * s->qscale * s->qscale) {
2411 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2413 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2415 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2416 wrap_y, 8) < 20 * s->qscale)
2418 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2419 wrap_y, 8) < 20 * s->qscale)
2421 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2423 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2425 if (!s->chroma_y_shift) { /* 422 */
2426 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2427 dest_cb + uv_dct_offset,
2428 wrap_c, 8) < 20 * s->qscale)
2430 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2431 dest_cr + uv_dct_offset,
2432 wrap_c, 8) < 20 * s->qscale)
2438 if (s->quantizer_noise_shaping) {
2440 get_visual_weight(weight[0], ptr_y , wrap_y);
2442 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2444 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2446 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2448 get_visual_weight(weight[4], ptr_cb , wrap_c);
2450 get_visual_weight(weight[5], ptr_cr , wrap_c);
2451 if (!s->chroma_y_shift) { /* 422 */
2453 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2456 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2459 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2462 /* DCT & quantize */
2463 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2465 for (i = 0; i < mb_block_count; i++) {
2468 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2469 // FIXME we could decide to change to quantizer instead of
2471 // JS: I don't think that would be a good idea it could lower
2472 // quality instead of improve it. Just INTRADC clipping
2473 // deserves changes in quantizer
2475 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2477 s->block_last_index[i] = -1;
2479 if (s->quantizer_noise_shaping) {
2480 for (i = 0; i < mb_block_count; i++) {
2482 s->block_last_index[i] =
2483 dct_quantize_refine(s, s->block[i], weight[i],
2484 orig[i], i, s->qscale);
2489 if (s->luma_elim_threshold && !s->mb_intra)
2490 for (i = 0; i < 4; i++)
2491 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2492 if (s->chroma_elim_threshold && !s->mb_intra)
2493 for (i = 4; i < mb_block_count; i++)
2494 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2496 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2497 for (i = 0; i < mb_block_count; i++) {
2498 if (s->block_last_index[i] == -1)
2499 s->coded_score[i] = INT_MAX / 256;
2504 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2505 s->block_last_index[4] =
2506 s->block_last_index[5] = 0;
2508 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2509 if (!s->chroma_y_shift) { /* 422 / 444 */
2510 for (i=6; i<12; i++) {
2511 s->block_last_index[i] = 0;
2512 s->block[i][0] = s->block[4][0];
2517 // non c quantize code returns incorrect block_last_index FIXME
2518 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2519 for (i = 0; i < mb_block_count; i++) {
2521 if (s->block_last_index[i] > 0) {
2522 for (j = 63; j > 0; j--) {
2523 if (s->block[i][s->intra_scantable.permutated[j]])
2526 s->block_last_index[i] = j;
2531 /* huffman encode */
2532 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2533 case AV_CODEC_ID_MPEG1VIDEO:
2534 case AV_CODEC_ID_MPEG2VIDEO:
2535 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2536 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2538 case AV_CODEC_ID_MPEG4:
2539 if (CONFIG_MPEG4_ENCODER)
2540 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2542 case AV_CODEC_ID_MSMPEG4V2:
2543 case AV_CODEC_ID_MSMPEG4V3:
2544 case AV_CODEC_ID_WMV1:
2545 if (CONFIG_MSMPEG4_ENCODER)
2546 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2548 case AV_CODEC_ID_WMV2:
2549 if (CONFIG_WMV2_ENCODER)
2550 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2552 case AV_CODEC_ID_H261:
2553 if (CONFIG_H261_ENCODER)
2554 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2556 case AV_CODEC_ID_H263:
2557 case AV_CODEC_ID_H263P:
2558 case AV_CODEC_ID_FLV1:
2559 case AV_CODEC_ID_RV10:
2560 case AV_CODEC_ID_RV20:
2561 if (CONFIG_H263_ENCODER)
2562 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2564 case AV_CODEC_ID_MJPEG:
2565 case AV_CODEC_ID_AMV:
2566 if (CONFIG_MJPEG_ENCODER)
2567 ff_mjpeg_encode_mb(s, s->block);
2574 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2576 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2577 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2578 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2581 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2584 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2587 d->mb_skip_run= s->mb_skip_run;
2589 d->last_dc[i] = s->last_dc[i];
2592 d->mv_bits= s->mv_bits;
2593 d->i_tex_bits= s->i_tex_bits;
2594 d->p_tex_bits= s->p_tex_bits;
2595 d->i_count= s->i_count;
2596 d->f_count= s->f_count;
2597 d->b_count= s->b_count;
2598 d->skip_count= s->skip_count;
2599 d->misc_bits= s->misc_bits;
2603 d->qscale= s->qscale;
2604 d->dquant= s->dquant;
2606 d->esc3_level_length= s->esc3_level_length;
2609 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2612 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2613 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2616 d->mb_skip_run= s->mb_skip_run;
2618 d->last_dc[i] = s->last_dc[i];
2621 d->mv_bits= s->mv_bits;
2622 d->i_tex_bits= s->i_tex_bits;
2623 d->p_tex_bits= s->p_tex_bits;
2624 d->i_count= s->i_count;
2625 d->f_count= s->f_count;
2626 d->b_count= s->b_count;
2627 d->skip_count= s->skip_count;
2628 d->misc_bits= s->misc_bits;
2630 d->mb_intra= s->mb_intra;
2631 d->mb_skipped= s->mb_skipped;
2632 d->mv_type= s->mv_type;
2633 d->mv_dir= s->mv_dir;
2635 if(s->data_partitioning){
2637 d->tex_pb= s->tex_pb;
2641 d->block_last_index[i]= s->block_last_index[i];
2642 d->interlaced_dct= s->interlaced_dct;
2643 d->qscale= s->qscale;
2645 d->esc3_level_length= s->esc3_level_length;
2648 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2649 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2650 int *dmin, int *next_block, int motion_x, int motion_y)
2653 uint8_t *dest_backup[3];
2655 copy_context_before_encode(s, backup, type);
2657 s->block= s->blocks[*next_block];
2658 s->pb= pb[*next_block];
2659 if(s->data_partitioning){
2660 s->pb2 = pb2 [*next_block];
2661 s->tex_pb= tex_pb[*next_block];
2665 memcpy(dest_backup, s->dest, sizeof(s->dest));
2666 s->dest[0] = s->sc.rd_scratchpad;
2667 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2668 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2669 av_assert0(s->linesize >= 32); //FIXME
2672 encode_mb(s, motion_x, motion_y);
2674 score= put_bits_count(&s->pb);
2675 if(s->data_partitioning){
2676 score+= put_bits_count(&s->pb2);
2677 score+= put_bits_count(&s->tex_pb);
2680 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2681 ff_mpv_reconstruct_mb(s, s->block);
2683 score *= s->lambda2;
2684 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2688 memcpy(s->dest, dest_backup, sizeof(s->dest));
2695 copy_context_after_encode(best, s, type);
2699 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2700 const uint32_t *sq = ff_square_tab + 256;
2705 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2706 else if(w==8 && h==8)
2707 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2711 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2720 static int sse_mb(MpegEncContext *s){
2724 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2725 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2728 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2729 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2730 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2731 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2733 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2734 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2735 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2738 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2739 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2740 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2743 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2744 MpegEncContext *s= *(void**)arg;
2748 s->me.dia_size= s->avctx->pre_dia_size;
2749 s->first_slice_line=1;
2750 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2751 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2752 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2754 s->first_slice_line=0;
2762 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2763 MpegEncContext *s= *(void**)arg;
2765 ff_check_alignment();
2767 s->me.dia_size= s->avctx->dia_size;
2768 s->first_slice_line=1;
2769 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2770 s->mb_x=0; //for block init below
2771 ff_init_block_index(s);
2772 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2773 s->block_index[0]+=2;
2774 s->block_index[1]+=2;
2775 s->block_index[2]+=2;
2776 s->block_index[3]+=2;
2778 /* compute motion vector & mb_type and store in context */
2779 if(s->pict_type==AV_PICTURE_TYPE_B)
2780 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2782 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2784 s->first_slice_line=0;
2789 static int mb_var_thread(AVCodecContext *c, void *arg){
2790 MpegEncContext *s= *(void**)arg;
2793 ff_check_alignment();
2795 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2796 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2799 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2801 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2803 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2804 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2806 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2807 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2808 s->me.mb_var_sum_temp += varc;
2814 static void write_slice_end(MpegEncContext *s){
2815 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2816 if(s->partitioned_frame){
2817 ff_mpeg4_merge_partitions(s);
2820 ff_mpeg4_stuffing(&s->pb);
2821 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2822 ff_mjpeg_encode_stuffing(s);
2825 avpriv_align_put_bits(&s->pb);
2826 flush_put_bits(&s->pb);
2828 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2829 s->misc_bits+= get_bits_diff(s);
2832 static void write_mb_info(MpegEncContext *s)
2834 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2835 int offset = put_bits_count(&s->pb);
2836 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2837 int gobn = s->mb_y / s->gob_index;
2839 if (CONFIG_H263_ENCODER)
2840 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2841 bytestream_put_le32(&ptr, offset);
2842 bytestream_put_byte(&ptr, s->qscale);
2843 bytestream_put_byte(&ptr, gobn);
2844 bytestream_put_le16(&ptr, mba);
2845 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2846 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2847 /* 4MV not implemented */
2848 bytestream_put_byte(&ptr, 0); /* hmv2 */
2849 bytestream_put_byte(&ptr, 0); /* vmv2 */
2852 static void update_mb_info(MpegEncContext *s, int startcode)
2856 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2857 s->mb_info_size += 12;
2858 s->prev_mb_info = s->last_mb_info;
2861 s->prev_mb_info = put_bits_count(&s->pb)/8;
2862 /* This might have incremented mb_info_size above, and we return without
2863 * actually writing any info into that slot yet. But in that case,
2864 * this will be called again at the start of the after writing the
2865 * start code, actually writing the mb info. */
2869 s->last_mb_info = put_bits_count(&s->pb)/8;
2870 if (!s->mb_info_size)
2871 s->mb_info_size += 12;
2875 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2877 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2878 && s->slice_context_count == 1
2879 && s->pb.buf == s->avctx->internal->byte_buffer) {
2880 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2881 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2883 uint8_t *new_buffer = NULL;
2884 int new_buffer_size = 0;
2886 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2887 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2888 return AVERROR(ENOMEM);
2893 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2894 s->avctx->internal->byte_buffer_size + size_increase);
2896 return AVERROR(ENOMEM);
2898 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2899 av_free(s->avctx->internal->byte_buffer);
2900 s->avctx->internal->byte_buffer = new_buffer;
2901 s->avctx->internal->byte_buffer_size = new_buffer_size;
2902 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2903 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2904 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2906 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2907 return AVERROR(EINVAL);
2911 static int encode_thread(AVCodecContext *c, void *arg){
2912 MpegEncContext *s= *(void**)arg;
2914 int chr_h= 16>>s->chroma_y_shift;
2916 MpegEncContext best_s = { 0 }, backup_s;
2917 uint8_t bit_buf[2][MAX_MB_BYTES];
2918 uint8_t bit_buf2[2][MAX_MB_BYTES];
2919 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2920 PutBitContext pb[2], pb2[2], tex_pb[2];
2922 ff_check_alignment();
2925 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2926 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2927 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2930 s->last_bits= put_bits_count(&s->pb);
2941 /* init last dc values */
2942 /* note: quant matrix value (8) is implied here */
2943 s->last_dc[i] = 128 << s->intra_dc_precision;
2945 s->current_picture.encoding_error[i] = 0;
2947 if(s->codec_id==AV_CODEC_ID_AMV){
2948 s->last_dc[0] = 128*8/13;
2949 s->last_dc[1] = 128*8/14;
2950 s->last_dc[2] = 128*8/14;
2953 memset(s->last_mv, 0, sizeof(s->last_mv));
2957 switch(s->codec_id){
2958 case AV_CODEC_ID_H263:
2959 case AV_CODEC_ID_H263P:
2960 case AV_CODEC_ID_FLV1:
2961 if (CONFIG_H263_ENCODER)
2962 s->gob_index = H263_GOB_HEIGHT(s->height);
2964 case AV_CODEC_ID_MPEG4:
2965 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2966 ff_mpeg4_init_partitions(s);
2972 s->first_slice_line = 1;
2973 s->ptr_lastgob = s->pb.buf;
2974 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2978 ff_set_qscale(s, s->qscale);
2979 ff_init_block_index(s);
2981 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2982 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2983 int mb_type= s->mb_type[xy];
2987 int size_increase = s->avctx->internal->byte_buffer_size/4
2988 + s->mb_width*MAX_MB_BYTES;
2990 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2991 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2992 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2995 if(s->data_partitioning){
2996 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2997 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2998 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3004 s->mb_y = mb_y; // moved into loop, can get changed by H.261
3005 ff_update_block_index(s);
3007 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3008 ff_h261_reorder_mb_index(s);
3009 xy= s->mb_y*s->mb_stride + s->mb_x;
3010 mb_type= s->mb_type[xy];
3013 /* write gob / video packet header */
3015 int current_packet_size, is_gob_start;
3017 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3019 is_gob_start = s->rtp_payload_size &&
3020 current_packet_size >= s->rtp_payload_size &&
3023 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3025 switch(s->codec_id){
3026 case AV_CODEC_ID_H263:
3027 case AV_CODEC_ID_H263P:
3028 if(!s->h263_slice_structured)
3029 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3031 case AV_CODEC_ID_MPEG2VIDEO:
3032 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3033 case AV_CODEC_ID_MPEG1VIDEO:
3034 if(s->mb_skip_run) is_gob_start=0;
3036 case AV_CODEC_ID_MJPEG:
3037 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3042 if(s->start_mb_y != mb_y || mb_x!=0){
3045 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3046 ff_mpeg4_init_partitions(s);
3050 av_assert2((put_bits_count(&s->pb)&7) == 0);
3051 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3053 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3054 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3055 int d = 100 / s->error_rate;
3057 current_packet_size=0;
3058 s->pb.buf_ptr= s->ptr_lastgob;
3059 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3063 #if FF_API_RTP_CALLBACK
3064 FF_DISABLE_DEPRECATION_WARNINGS
3065 if (s->avctx->rtp_callback){
3066 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3067 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3069 FF_ENABLE_DEPRECATION_WARNINGS
3071 update_mb_info(s, 1);
3073 switch(s->codec_id){
3074 case AV_CODEC_ID_MPEG4:
3075 if (CONFIG_MPEG4_ENCODER) {
3076 ff_mpeg4_encode_video_packet_header(s);
3077 ff_mpeg4_clean_buffers(s);
3080 case AV_CODEC_ID_MPEG1VIDEO:
3081 case AV_CODEC_ID_MPEG2VIDEO:
3082 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3083 ff_mpeg1_encode_slice_header(s);
3084 ff_mpeg1_clean_buffers(s);
3087 case AV_CODEC_ID_H263:
3088 case AV_CODEC_ID_H263P:
3089 if (CONFIG_H263_ENCODER)
3090 ff_h263_encode_gob_header(s, mb_y);
3094 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3095 int bits= put_bits_count(&s->pb);
3096 s->misc_bits+= bits - s->last_bits;
3100 s->ptr_lastgob += current_packet_size;
3101 s->first_slice_line=1;
3102 s->resync_mb_x=mb_x;
3103 s->resync_mb_y=mb_y;
3107 if( (s->resync_mb_x == s->mb_x)
3108 && s->resync_mb_y+1 == s->mb_y){
3109 s->first_slice_line=0;
3113 s->dquant=0; //only for QP_RD
3115 update_mb_info(s, 0);
3117 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3119 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3121 copy_context_before_encode(&backup_s, s, -1);
3123 best_s.data_partitioning= s->data_partitioning;
3124 best_s.partitioned_frame= s->partitioned_frame;
3125 if(s->data_partitioning){
3126 backup_s.pb2= s->pb2;
3127 backup_s.tex_pb= s->tex_pb;
3130 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3131 s->mv_dir = MV_DIR_FORWARD;
3132 s->mv_type = MV_TYPE_16X16;
3134 s->mv[0][0][0] = s->p_mv_table[xy][0];
3135 s->mv[0][0][1] = s->p_mv_table[xy][1];
3136 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3137 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3139 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3140 s->mv_dir = MV_DIR_FORWARD;
3141 s->mv_type = MV_TYPE_FIELD;
3144 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3145 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3146 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3148 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3149 &dmin, &next_block, 0, 0);
3151 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3152 s->mv_dir = MV_DIR_FORWARD;
3153 s->mv_type = MV_TYPE_16X16;
3157 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3158 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3160 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3161 s->mv_dir = MV_DIR_FORWARD;
3162 s->mv_type = MV_TYPE_8X8;
3165 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3166 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3168 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3169 &dmin, &next_block, 0, 0);
3171 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3172 s->mv_dir = MV_DIR_FORWARD;
3173 s->mv_type = MV_TYPE_16X16;
3175 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3176 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3177 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3178 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3180 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3181 s->mv_dir = MV_DIR_BACKWARD;
3182 s->mv_type = MV_TYPE_16X16;
3184 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3185 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3186 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3187 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3189 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3190 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3191 s->mv_type = MV_TYPE_16X16;
3193 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3194 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3195 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3196 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3197 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3198 &dmin, &next_block, 0, 0);
3200 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3201 s->mv_dir = MV_DIR_FORWARD;
3202 s->mv_type = MV_TYPE_FIELD;
3205 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3206 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3207 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3209 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3210 &dmin, &next_block, 0, 0);
3212 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3213 s->mv_dir = MV_DIR_BACKWARD;
3214 s->mv_type = MV_TYPE_FIELD;
3217 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3218 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3219 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3221 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3222 &dmin, &next_block, 0, 0);
3224 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3225 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3226 s->mv_type = MV_TYPE_FIELD;
3228 for(dir=0; dir<2; dir++){
3230 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3231 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3232 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3235 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3236 &dmin, &next_block, 0, 0);
3238 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3240 s->mv_type = MV_TYPE_16X16;
3244 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3245 &dmin, &next_block, 0, 0);
3246 if(s->h263_pred || s->h263_aic){
3248 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3250 ff_clean_intra_table_entries(s); //old mode?
3254 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3255 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3256 const int last_qp= backup_s.qscale;
3259 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3260 static const int dquant_tab[4]={-1,1,-2,2};
3261 int storecoefs = s->mb_intra && s->dc_val[0];
3263 av_assert2(backup_s.dquant == 0);
3266 s->mv_dir= best_s.mv_dir;
3267 s->mv_type = MV_TYPE_16X16;
3268 s->mb_intra= best_s.mb_intra;
3269 s->mv[0][0][0] = best_s.mv[0][0][0];
3270 s->mv[0][0][1] = best_s.mv[0][0][1];
3271 s->mv[1][0][0] = best_s.mv[1][0][0];
3272 s->mv[1][0][1] = best_s.mv[1][0][1];
3274 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3275 for(; qpi<4; qpi++){
3276 int dquant= dquant_tab[qpi];
3277 qp= last_qp + dquant;
3278 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3280 backup_s.dquant= dquant;
3283 dc[i]= s->dc_val[0][ s->block_index[i] ];
3284 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3288 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3289 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3290 if(best_s.qscale != qp){
3293 s->dc_val[0][ s->block_index[i] ]= dc[i];
3294 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3301 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3302 int mx= s->b_direct_mv_table[xy][0];
3303 int my= s->b_direct_mv_table[xy][1];
3305 backup_s.dquant = 0;
3306 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3308 ff_mpeg4_set_direct_mv(s, mx, my);
3309 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3310 &dmin, &next_block, mx, my);
3312 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3313 backup_s.dquant = 0;
3314 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3316 ff_mpeg4_set_direct_mv(s, 0, 0);
3317 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3318 &dmin, &next_block, 0, 0);
3320 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3323 coded |= s->block_last_index[i];
3326 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3327 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3328 mx=my=0; //FIXME find the one we actually used
3329 ff_mpeg4_set_direct_mv(s, mx, my);
3330 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3338 s->mv_dir= best_s.mv_dir;
3339 s->mv_type = best_s.mv_type;
3341 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3342 s->mv[0][0][1] = best_s.mv[0][0][1];
3343 s->mv[1][0][0] = best_s.mv[1][0][0];
3344 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3347 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3348 &dmin, &next_block, mx, my);
3353 s->current_picture.qscale_table[xy] = best_s.qscale;
3355 copy_context_after_encode(s, &best_s, -1);
3357 pb_bits_count= put_bits_count(&s->pb);
3358 flush_put_bits(&s->pb);
3359 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3362 if(s->data_partitioning){
3363 pb2_bits_count= put_bits_count(&s->pb2);
3364 flush_put_bits(&s->pb2);
3365 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3366 s->pb2= backup_s.pb2;
3368 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3369 flush_put_bits(&s->tex_pb);
3370 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3371 s->tex_pb= backup_s.tex_pb;
3373 s->last_bits= put_bits_count(&s->pb);
3375 if (CONFIG_H263_ENCODER &&
3376 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3377 ff_h263_update_motion_val(s);
3379 if(next_block==0){ //FIXME 16 vs linesize16
3380 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3381 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3382 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3385 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3386 ff_mpv_reconstruct_mb(s, s->block);
3388 int motion_x = 0, motion_y = 0;
3389 s->mv_type=MV_TYPE_16X16;
3390 // only one MB-Type possible
3393 case CANDIDATE_MB_TYPE_INTRA:
3396 motion_x= s->mv[0][0][0] = 0;
3397 motion_y= s->mv[0][0][1] = 0;
3399 case CANDIDATE_MB_TYPE_INTER:
3400 s->mv_dir = MV_DIR_FORWARD;
3402 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3403 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3405 case CANDIDATE_MB_TYPE_INTER_I:
3406 s->mv_dir = MV_DIR_FORWARD;
3407 s->mv_type = MV_TYPE_FIELD;
3410 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3411 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3412 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3415 case CANDIDATE_MB_TYPE_INTER4V:
3416 s->mv_dir = MV_DIR_FORWARD;
3417 s->mv_type = MV_TYPE_8X8;
3420 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3421 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3424 case CANDIDATE_MB_TYPE_DIRECT:
3425 if (CONFIG_MPEG4_ENCODER) {
3426 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3428 motion_x=s->b_direct_mv_table[xy][0];
3429 motion_y=s->b_direct_mv_table[xy][1];
3430 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3433 case CANDIDATE_MB_TYPE_DIRECT0:
3434 if (CONFIG_MPEG4_ENCODER) {
3435 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3437 ff_mpeg4_set_direct_mv(s, 0, 0);
3440 case CANDIDATE_MB_TYPE_BIDIR:
3441 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3443 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3444 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3445 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3446 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3448 case CANDIDATE_MB_TYPE_BACKWARD:
3449 s->mv_dir = MV_DIR_BACKWARD;
3451 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3452 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3454 case CANDIDATE_MB_TYPE_FORWARD:
3455 s->mv_dir = MV_DIR_FORWARD;
3457 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3458 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3460 case CANDIDATE_MB_TYPE_FORWARD_I:
3461 s->mv_dir = MV_DIR_FORWARD;
3462 s->mv_type = MV_TYPE_FIELD;
3465 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3466 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3467 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3470 case CANDIDATE_MB_TYPE_BACKWARD_I:
3471 s->mv_dir = MV_DIR_BACKWARD;
3472 s->mv_type = MV_TYPE_FIELD;
3475 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3476 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3477 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3480 case CANDIDATE_MB_TYPE_BIDIR_I:
3481 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3482 s->mv_type = MV_TYPE_FIELD;
3484 for(dir=0; dir<2; dir++){
3486 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3487 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3488 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3493 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3496 encode_mb(s, motion_x, motion_y);
3498 // RAL: Update last macroblock type
3499 s->last_mv_dir = s->mv_dir;
3501 if (CONFIG_H263_ENCODER &&
3502 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3503 ff_h263_update_motion_val(s);
3505 ff_mpv_reconstruct_mb(s, s->block);
3508 /* clean the MV table in IPS frames for direct mode in B-frames */
3509 if(s->mb_intra /* && I,P,S_TYPE */){
3510 s->p_mv_table[xy][0]=0;
3511 s->p_mv_table[xy][1]=0;
3514 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3518 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3519 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3521 s->current_picture.encoding_error[0] += sse(
3522 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3523 s->dest[0], w, h, s->linesize);
3524 s->current_picture.encoding_error[1] += sse(
3525 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3526 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3527 s->current_picture.encoding_error[2] += sse(
3528 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3529 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3532 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3533 ff_h263_loop_filter(s);
3535 ff_dlog(s->avctx, "MB %d %d bits\n",
3536 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3540 //not beautiful here but we must write it before flushing so it has to be here
3541 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3542 ff_msmpeg4_encode_ext_header(s);
3546 #if FF_API_RTP_CALLBACK
3547 FF_DISABLE_DEPRECATION_WARNINGS
3548 /* Send the last GOB if RTP */
3549 if (s->avctx->rtp_callback) {
3550 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3551 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3552 /* Call the RTP callback to send the last GOB */
3554 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3556 FF_ENABLE_DEPRECATION_WARNINGS
3562 #define MERGE(field) dst->field += src->field; src->field=0
3563 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3564 MERGE(me.scene_change_score);
3565 MERGE(me.mc_mb_var_sum_temp);
3566 MERGE(me.mb_var_sum_temp);
3569 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3572 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3573 MERGE(dct_count[1]);
3582 MERGE(er.error_count);
3583 MERGE(padding_bug_score);
3584 MERGE(current_picture.encoding_error[0]);
3585 MERGE(current_picture.encoding_error[1]);
3586 MERGE(current_picture.encoding_error[2]);
3588 if (dst->noise_reduction){
3589 for(i=0; i<64; i++){
3590 MERGE(dct_error_sum[0][i]);
3591 MERGE(dct_error_sum[1][i]);
3595 assert(put_bits_count(&src->pb) % 8 ==0);
3596 assert(put_bits_count(&dst->pb) % 8 ==0);
3597 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3598 flush_put_bits(&dst->pb);
3601 static int estimate_qp(MpegEncContext *s, int dry_run){
3602 if (s->next_lambda){
3603 s->current_picture_ptr->f->quality =
3604 s->current_picture.f->quality = s->next_lambda;
3605 if(!dry_run) s->next_lambda= 0;
3606 } else if (!s->fixed_qscale) {
3607 int quality = ff_rate_estimate_qscale(s, dry_run);
3608 s->current_picture_ptr->f->quality =
3609 s->current_picture.f->quality = quality;
3610 if (s->current_picture.f->quality < 0)
3614 if(s->adaptive_quant){
3615 switch(s->codec_id){
3616 case AV_CODEC_ID_MPEG4:
3617 if (CONFIG_MPEG4_ENCODER)
3618 ff_clean_mpeg4_qscales(s);
3620 case AV_CODEC_ID_H263:
3621 case AV_CODEC_ID_H263P:
3622 case AV_CODEC_ID_FLV1:
3623 if (CONFIG_H263_ENCODER)
3624 ff_clean_h263_qscales(s);
3627 ff_init_qscale_tab(s);
3630 s->lambda= s->lambda_table[0];
3633 s->lambda = s->current_picture.f->quality;
3638 /* must be called before writing the header */
3639 static void set_frame_distances(MpegEncContext * s){
3640 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3641 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3643 if(s->pict_type==AV_PICTURE_TYPE_B){
3644 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3645 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3647 s->pp_time= s->time - s->last_non_b_time;
3648 s->last_non_b_time= s->time;
3649 assert(s->picture_number==0 || s->pp_time > 0);
3653 static int encode_picture(MpegEncContext *s, int picture_number)
3657 int context_count = s->slice_context_count;
3659 s->picture_number = picture_number;
3661 /* Reset the average MB variance */
3662 s->me.mb_var_sum_temp =
3663 s->me.mc_mb_var_sum_temp = 0;
3665 /* we need to initialize some time vars before we can encode B-frames */
3666 // RAL: Condition added for MPEG1VIDEO
3667 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3668 set_frame_distances(s);
3669 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3670 ff_set_mpeg4_time(s);
3672 s->me.scene_change_score=0;
3674 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3676 if(s->pict_type==AV_PICTURE_TYPE_I){
3677 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3678 else s->no_rounding=0;
3679 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3680 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3681 s->no_rounding ^= 1;
3684 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3685 if (estimate_qp(s,1) < 0)
3687 ff_get_2pass_fcode(s);
3688 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3689 if(s->pict_type==AV_PICTURE_TYPE_B)
3690 s->lambda= s->last_lambda_for[s->pict_type];
3692 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3696 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3697 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3698 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3699 s->q_chroma_intra_matrix = s->q_intra_matrix;
3700 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3703 s->mb_intra=0; //for the rate distortion & bit compare functions
3704 for(i=1; i<context_count; i++){
3705 ret = ff_update_duplicate_context(s->thread_context[i], s);
3713 /* Estimate motion for every MB */
3714 if(s->pict_type != AV_PICTURE_TYPE_I){
3715 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3716 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3717 if (s->pict_type != AV_PICTURE_TYPE_B) {
3718 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3720 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3724 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3725 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3727 for(i=0; i<s->mb_stride*s->mb_height; i++)
3728 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3730 if(!s->fixed_qscale){
3731 /* finding spatial complexity for I-frame rate control */
3732 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3735 for(i=1; i<context_count; i++){
3736 merge_context_after_me(s, s->thread_context[i]);
3738 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3739 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3742 if (s->me.scene_change_score > s->scenechange_threshold &&
3743 s->pict_type == AV_PICTURE_TYPE_P) {
3744 s->pict_type= AV_PICTURE_TYPE_I;
3745 for(i=0; i<s->mb_stride*s->mb_height; i++)
3746 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3747 if(s->msmpeg4_version >= 3)
3749 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3750 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3754 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3755 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3757 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3759 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3760 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3761 s->f_code= FFMAX3(s->f_code, a, b);
3764 ff_fix_long_p_mvs(s);
3765 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3766 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3770 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3771 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3776 if(s->pict_type==AV_PICTURE_TYPE_B){
3779 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3780 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3781 s->f_code = FFMAX(a, b);
3783 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3784 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3785 s->b_code = FFMAX(a, b);
3787 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3788 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3789 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3790 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3791 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3793 for(dir=0; dir<2; dir++){
3796 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3797 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3798 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3799 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3807 if (estimate_qp(s, 0) < 0)
3810 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3811 s->pict_type == AV_PICTURE_TYPE_I &&
3812 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3813 s->qscale= 3; //reduce clipping problems
3815 if (s->out_format == FMT_MJPEG) {
3816 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3817 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3819 if (s->avctx->intra_matrix) {
3821 luma_matrix = s->avctx->intra_matrix;
3823 if (s->avctx->chroma_intra_matrix)
3824 chroma_matrix = s->avctx->chroma_intra_matrix;
3826 /* for mjpeg, we do include qscale in the matrix */
3828 int j = s->idsp.idct_permutation[i];
3830 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3831 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3833 s->y_dc_scale_table=
3834 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3835 s->chroma_intra_matrix[0] =
3836 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3837 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3838 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3839 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3840 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3843 if(s->codec_id == AV_CODEC_ID_AMV){
3844 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3845 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3847 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3849 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3850 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3852 s->y_dc_scale_table= y;
3853 s->c_dc_scale_table= c;
3854 s->intra_matrix[0] = 13;
3855 s->chroma_intra_matrix[0] = 14;
3856 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3857 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3858 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3859 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3863 //FIXME var duplication
3864 s->current_picture_ptr->f->key_frame =
3865 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3866 s->current_picture_ptr->f->pict_type =
3867 s->current_picture.f->pict_type = s->pict_type;
3869 if (s->current_picture.f->key_frame)
3870 s->picture_in_gop_number=0;
3872 s->mb_x = s->mb_y = 0;
3873 s->last_bits= put_bits_count(&s->pb);
3874 switch(s->out_format) {
3876 if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3877 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3878 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3881 if (CONFIG_H261_ENCODER)
3882 ff_h261_encode_picture_header(s, picture_number);
3885 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3886 ff_wmv2_encode_picture_header(s, picture_number);
3887 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3888 ff_msmpeg4_encode_picture_header(s, picture_number);
3889 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3890 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3893 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3894 ret = ff_rv10_encode_picture_header(s, picture_number);
3898 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3899 ff_rv20_encode_picture_header(s, picture_number);
3900 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3901 ff_flv_encode_picture_header(s, picture_number);
3902 else if (CONFIG_H263_ENCODER)
3903 ff_h263_encode_picture_header(s, picture_number);
3906 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3907 ff_mpeg1_encode_picture_header(s, picture_number);
3912 bits= put_bits_count(&s->pb);
3913 s->header_bits= bits - s->last_bits;
3915 for(i=1; i<context_count; i++){
3916 update_duplicate_context_after_me(s->thread_context[i], s);
3918 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3919 for(i=1; i<context_count; i++){
3920 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3921 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3922 merge_context_after_encode(s, s->thread_context[i]);
3928 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3929 const int intra= s->mb_intra;
3932 s->dct_count[intra]++;
3934 for(i=0; i<64; i++){
3935 int level= block[i];
3939 s->dct_error_sum[intra][i] += level;
3940 level -= s->dct_offset[intra][i];
3941 if(level<0) level=0;
3943 s->dct_error_sum[intra][i] -= level;
3944 level += s->dct_offset[intra][i];
3945 if(level>0) level=0;
3952 static int dct_quantize_trellis_c(MpegEncContext *s,
3953 int16_t *block, int n,
3954 int qscale, int *overflow){
3956 const uint16_t *matrix;
3957 const uint8_t *scantable;
3958 const uint8_t *perm_scantable;
3960 unsigned int threshold1, threshold2;
3972 int coeff_count[64];
3973 int qmul, qadd, start_i, last_non_zero, i, dc;
3974 const int esc_length= s->ac_esc_length;
3976 uint8_t * last_length;
3977 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3980 s->fdsp.fdct(block);
3982 if(s->dct_error_sum)
3983 s->denoise_dct(s, block);
3985 qadd= ((qscale-1)|1)*8;
3987 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3988 else mpeg2_qscale = qscale << 1;
3992 scantable= s->intra_scantable.scantable;
3993 perm_scantable= s->intra_scantable.permutated;
4001 /* For AIC we skip quant/dequant of INTRADC */
4006 /* note: block[0] is assumed to be positive */
4007 block[0] = (block[0] + (q >> 1)) / q;
4010 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4011 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4012 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4013 bias= 1<<(QMAT_SHIFT-1);
4015 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4016 length = s->intra_chroma_ac_vlc_length;
4017 last_length= s->intra_chroma_ac_vlc_last_length;
4019 length = s->intra_ac_vlc_length;
4020 last_length= s->intra_ac_vlc_last_length;
4023 scantable= s->inter_scantable.scantable;
4024 perm_scantable= s->inter_scantable.permutated;
4027 qmat = s->q_inter_matrix[qscale];
4028 matrix = s->inter_matrix;
4029 length = s->inter_ac_vlc_length;
4030 last_length= s->inter_ac_vlc_last_length;
4034 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4035 threshold2= (threshold1<<1);
4037 for(i=63; i>=start_i; i--) {
4038 const int j = scantable[i];
4039 int level = block[j] * qmat[j];
4041 if(((unsigned)(level+threshold1))>threshold2){
4047 for(i=start_i; i<=last_non_zero; i++) {
4048 const int j = scantable[i];
4049 int level = block[j] * qmat[j];
4051 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4052 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4053 if(((unsigned)(level+threshold1))>threshold2){
4055 level= (bias + level)>>QMAT_SHIFT;
4057 coeff[1][i]= level-1;
4058 // coeff[2][k]= level-2;
4060 level= (bias - level)>>QMAT_SHIFT;
4061 coeff[0][i]= -level;
4062 coeff[1][i]= -level+1;
4063 // coeff[2][k]= -level+2;
4065 coeff_count[i]= FFMIN(level, 2);
4066 av_assert2(coeff_count[i]);
4069 coeff[0][i]= (level>>31)|1;
4074 *overflow= s->max_qcoeff < max; //overflow might have happened
4076 if(last_non_zero < start_i){
4077 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4078 return last_non_zero;
4081 score_tab[start_i]= 0;
4082 survivor[0]= start_i;
4085 for(i=start_i; i<=last_non_zero; i++){
4086 int level_index, j, zero_distortion;
4087 int dct_coeff= FFABS(block[ scantable[i] ]);
4088 int best_score=256*256*256*120;
4090 if (s->fdsp.fdct == ff_fdct_ifast)
4091 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4092 zero_distortion= dct_coeff*dct_coeff;
4094 for(level_index=0; level_index < coeff_count[i]; level_index++){
4096 int level= coeff[level_index][i];
4097 const int alevel= FFABS(level);
4102 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4103 unquant_coeff= alevel*qmul + qadd;
4104 } else if(s->out_format == FMT_MJPEG) {
4105 j = s->idsp.idct_permutation[scantable[i]];
4106 unquant_coeff = alevel * matrix[j] * 8;
4108 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4110 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4111 unquant_coeff = (unquant_coeff - 1) | 1;
4113 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4114 unquant_coeff = (unquant_coeff - 1) | 1;
4119 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4121 if((level&(~127)) == 0){
4122 for(j=survivor_count-1; j>=0; j--){
4123 int run= i - survivor[j];
4124 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4125 score += score_tab[i-run];
4127 if(score < best_score){
4130 level_tab[i+1]= level-64;
4134 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4135 for(j=survivor_count-1; j>=0; j--){
4136 int run= i - survivor[j];
4137 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4138 score += score_tab[i-run];
4139 if(score < last_score){
4142 last_level= level-64;
4148 distortion += esc_length*lambda;
4149 for(j=survivor_count-1; j>=0; j--){
4150 int run= i - survivor[j];
4151 int score= distortion + score_tab[i-run];
4153 if(score < best_score){
4156 level_tab[i+1]= level-64;
4160 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4161 for(j=survivor_count-1; j>=0; j--){
4162 int run= i - survivor[j];
4163 int score= distortion + score_tab[i-run];
4164 if(score < last_score){
4167 last_level= level-64;
4175 score_tab[i+1]= best_score;
4177 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4178 if(last_non_zero <= 27){
4179 for(; survivor_count; survivor_count--){
4180 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4184 for(; survivor_count; survivor_count--){
4185 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4190 survivor[ survivor_count++ ]= i+1;
4193 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4194 last_score= 256*256*256*120;
4195 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4196 int score= score_tab[i];
4198 score += lambda * 2; // FIXME more exact?
4200 if(score < last_score){
4203 last_level= level_tab[i];
4204 last_run= run_tab[i];
4209 s->coded_score[n] = last_score;
4211 dc= FFABS(block[0]);
4212 last_non_zero= last_i - 1;
4213 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4215 if(last_non_zero < start_i)
4216 return last_non_zero;
4218 if(last_non_zero == 0 && start_i == 0){
4220 int best_score= dc * dc;
4222 for(i=0; i<coeff_count[0]; i++){
4223 int level= coeff[i][0];
4224 int alevel= FFABS(level);
4225 int unquant_coeff, score, distortion;
4227 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4228 unquant_coeff= (alevel*qmul + qadd)>>3;
4230 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4231 unquant_coeff = (unquant_coeff - 1) | 1;
4233 unquant_coeff = (unquant_coeff + 4) >> 3;
4234 unquant_coeff<<= 3 + 3;
4236 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4238 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4239 else score= distortion + esc_length*lambda;
4241 if(score < best_score){
4243 best_level= level - 64;
4246 block[0]= best_level;
4247 s->coded_score[n] = best_score - dc*dc;
4248 if(best_level == 0) return -1;
4249 else return last_non_zero;
4253 av_assert2(last_level);
4255 block[ perm_scantable[last_non_zero] ]= last_level;
4258 for(; i>start_i; i -= run_tab[i] + 1){
4259 block[ perm_scantable[i-1] ]= level_tab[i];
4262 return last_non_zero;
4265 //#define REFINE_STATS 1
4266 static int16_t basis[64][64];
4268 static void build_basis(uint8_t *perm){
4275 double s= 0.25*(1<<BASIS_SHIFT);
4277 int perm_index= perm[index];
4278 if(i==0) s*= sqrt(0.5);
4279 if(j==0) s*= sqrt(0.5);
4280 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4287 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4288 int16_t *block, int16_t *weight, int16_t *orig,
4291 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4292 const uint8_t *scantable;
4293 const uint8_t *perm_scantable;
4294 // unsigned int threshold1, threshold2;
4299 int qmul, qadd, start_i, last_non_zero, i, dc;
4301 uint8_t * last_length;
4303 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4306 static int after_last=0;
4307 static int to_zero=0;
4308 static int from_zero=0;
4311 static int messed_sign=0;
4314 if(basis[0][0] == 0)
4315 build_basis(s->idsp.idct_permutation);
4320 scantable= s->intra_scantable.scantable;
4321 perm_scantable= s->intra_scantable.permutated;
4328 /* For AIC we skip quant/dequant of INTRADC */
4332 q <<= RECON_SHIFT-3;
4333 /* note: block[0] is assumed to be positive */
4335 // block[0] = (block[0] + (q >> 1)) / q;
4337 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4338 // bias= 1<<(QMAT_SHIFT-1);
4339 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4340 length = s->intra_chroma_ac_vlc_length;
4341 last_length= s->intra_chroma_ac_vlc_last_length;
4343 length = s->intra_ac_vlc_length;
4344 last_length= s->intra_ac_vlc_last_length;
4347 scantable= s->inter_scantable.scantable;
4348 perm_scantable= s->inter_scantable.permutated;
4351 length = s->inter_ac_vlc_length;
4352 last_length= s->inter_ac_vlc_last_length;
4354 last_non_zero = s->block_last_index[n];
4359 dc += (1<<(RECON_SHIFT-1));
4360 for(i=0; i<64; i++){
4361 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4364 STOP_TIMER("memset rem[]")}
4367 for(i=0; i<64; i++){
4372 w= FFABS(weight[i]) + qns*one;
4373 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4376 // w=weight[i] = (63*qns + (w/2)) / w;
4379 av_assert2(w<(1<<6));
4382 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4388 for(i=start_i; i<=last_non_zero; i++){
4389 int j= perm_scantable[i];
4390 const int level= block[j];
4394 if(level<0) coeff= qmul*level - qadd;
4395 else coeff= qmul*level + qadd;
4396 run_tab[rle_index++]=run;
4399 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4405 if(last_non_zero>0){
4406 STOP_TIMER("init rem[]")
4413 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4416 int run2, best_unquant_change=0, analyze_gradient;
4420 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4422 if(analyze_gradient){
4426 for(i=0; i<64; i++){
4429 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4432 STOP_TIMER("rem*w*w")}
4442 const int level= block[0];
4443 int change, old_coeff;
4445 av_assert2(s->mb_intra);
4449 for(change=-1; change<=1; change+=2){
4450 int new_level= level + change;
4451 int score, new_coeff;
4453 new_coeff= q*new_level;
4454 if(new_coeff >= 2048 || new_coeff < 0)
4457 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4458 new_coeff - old_coeff);
4459 if(score<best_score){
4462 best_change= change;
4463 best_unquant_change= new_coeff - old_coeff;
4470 run2= run_tab[rle_index++];
4474 for(i=start_i; i<64; i++){
4475 int j= perm_scantable[i];
4476 const int level= block[j];
4477 int change, old_coeff;
4479 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4483 if(level<0) old_coeff= qmul*level - qadd;
4484 else old_coeff= qmul*level + qadd;
4485 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4489 av_assert2(run2>=0 || i >= last_non_zero );
4492 for(change=-1; change<=1; change+=2){
4493 int new_level= level + change;
4494 int score, new_coeff, unquant_change;
4497 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4501 if(new_level<0) new_coeff= qmul*new_level - qadd;
4502 else new_coeff= qmul*new_level + qadd;
4503 if(new_coeff >= 2048 || new_coeff <= -2048)
4505 //FIXME check for overflow
4508 if(level < 63 && level > -63){
4509 if(i < last_non_zero)
4510 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4511 - length[UNI_AC_ENC_INDEX(run, level+64)];
4513 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4514 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4517 av_assert2(FFABS(new_level)==1);
4519 if(analyze_gradient){
4520 int g= d1[ scantable[i] ];
4521 if(g && (g^new_level) >= 0)
4525 if(i < last_non_zero){
4526 int next_i= i + run2 + 1;
4527 int next_level= block[ perm_scantable[next_i] ] + 64;
4529 if(next_level&(~127))
4532 if(next_i < last_non_zero)
4533 score += length[UNI_AC_ENC_INDEX(run, 65)]
4534 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4535 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4537 score += length[UNI_AC_ENC_INDEX(run, 65)]
4538 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4539 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4541 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4543 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4544 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4550 av_assert2(FFABS(level)==1);
4552 if(i < last_non_zero){
4553 int next_i= i + run2 + 1;
4554 int next_level= block[ perm_scantable[next_i] ] + 64;
4556 if(next_level&(~127))
4559 if(next_i < last_non_zero)
4560 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4561 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4562 - length[UNI_AC_ENC_INDEX(run, 65)];
4564 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4565 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4566 - length[UNI_AC_ENC_INDEX(run, 65)];
4568 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4570 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4571 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4578 unquant_change= new_coeff - old_coeff;
4579 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4581 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4583 if(score<best_score){
4586 best_change= change;
4587 best_unquant_change= unquant_change;
4591 prev_level= level + 64;
4592 if(prev_level&(~127))
4601 STOP_TIMER("iterative step")}
4605 int j= perm_scantable[ best_coeff ];
4607 block[j] += best_change;
4609 if(best_coeff > last_non_zero){
4610 last_non_zero= best_coeff;
4611 av_assert2(block[j]);
4618 if(block[j] - best_change){
4619 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4631 for(; last_non_zero>=start_i; last_non_zero--){
4632 if(block[perm_scantable[last_non_zero]])
4638 if(256*256*256*64 % count == 0){
4639 av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4644 for(i=start_i; i<=last_non_zero; i++){
4645 int j= perm_scantable[i];
4646 const int level= block[j];
4649 run_tab[rle_index++]=run;
4656 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4662 if(last_non_zero>0){
4663 STOP_TIMER("iterative search")
4668 return last_non_zero;
4672 * Permute an 8x8 block according to permutation.
4673 * @param block the block which will be permuted according to
4674 * the given permutation vector
4675 * @param permutation the permutation vector
4676 * @param last the last non zero coefficient in scantable order, used to
4677 * speed the permutation up
4678 * @param scantable the used scantable, this is only used to speed the
4679 * permutation up, the block is not (inverse) permutated
4680 * to scantable order!
4682 void ff_block_permute(int16_t *block, uint8_t *permutation,
4683 const uint8_t *scantable, int last)
4690 //FIXME it is ok but not clean and might fail for some permutations
4691 // if (permutation[1] == 1)
4694 for (i = 0; i <= last; i++) {
4695 const int j = scantable[i];
4700 for (i = 0; i <= last; i++) {
4701 const int j = scantable[i];
4702 const int perm_j = permutation[j];
4703 block[perm_j] = temp[j];
4707 int ff_dct_quantize_c(MpegEncContext *s,
4708 int16_t *block, int n,
4709 int qscale, int *overflow)
4711 int i, j, level, last_non_zero, q, start_i;
4713 const uint8_t *scantable;
4716 unsigned int threshold1, threshold2;
4718 s->fdsp.fdct(block);
4720 if(s->dct_error_sum)
4721 s->denoise_dct(s, block);
4724 scantable= s->intra_scantable.scantable;
4732 /* For AIC we skip quant/dequant of INTRADC */
4735 /* note: block[0] is assumed to be positive */
4736 block[0] = (block[0] + (q >> 1)) / q;
4739 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4740 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4742 scantable= s->inter_scantable.scantable;
4745 qmat = s->q_inter_matrix[qscale];
4746 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4748 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4749 threshold2= (threshold1<<1);
4750 for(i=63;i>=start_i;i--) {
4752 level = block[j] * qmat[j];
4754 if(((unsigned)(level+threshold1))>threshold2){
4761 for(i=start_i; i<=last_non_zero; i++) {
4763 level = block[j] * qmat[j];
4765 // if( bias+level >= (1<<QMAT_SHIFT)
4766 // || bias-level >= (1<<QMAT_SHIFT)){
4767 if(((unsigned)(level+threshold1))>threshold2){
4769 level= (bias + level)>>QMAT_SHIFT;
4772 level= (bias - level)>>QMAT_SHIFT;
4780 *overflow= s->max_qcoeff < max; //overflow might have happened
4782 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4783 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4784 ff_block_permute(block, s->idsp.idct_permutation,
4785 scantable, last_non_zero);
4787 return last_non_zero;
4790 #define OFFSET(x) offsetof(MpegEncContext, x)
4791 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4792 static const AVOption h263_options[] = {
4793 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4794 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4799 static const AVClass h263_class = {
4800 .class_name = "H.263 encoder",
4801 .item_name = av_default_item_name,
4802 .option = h263_options,
4803 .version = LIBAVUTIL_VERSION_INT,
4806 AVCodec ff_h263_encoder = {
4808 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4809 .type = AVMEDIA_TYPE_VIDEO,
4810 .id = AV_CODEC_ID_H263,
4811 .priv_data_size = sizeof(MpegEncContext),
4812 .init = ff_mpv_encode_init,
4813 .encode2 = ff_mpv_encode_picture,
4814 .close = ff_mpv_encode_end,
4815 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4816 .priv_class = &h263_class,
4819 static const AVOption h263p_options[] = {
4820 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4821 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4822 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4823 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4827 static const AVClass h263p_class = {
4828 .class_name = "H.263p encoder",
4829 .item_name = av_default_item_name,
4830 .option = h263p_options,
4831 .version = LIBAVUTIL_VERSION_INT,
4834 AVCodec ff_h263p_encoder = {
4836 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4837 .type = AVMEDIA_TYPE_VIDEO,
4838 .id = AV_CODEC_ID_H263P,
4839 .priv_data_size = sizeof(MpegEncContext),
4840 .init = ff_mpv_encode_init,
4841 .encode2 = ff_mpv_encode_picture,
4842 .close = ff_mpv_encode_end,
4843 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4844 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4845 .priv_class = &h263p_class,
4848 static const AVClass msmpeg4v2_class = {
4849 .class_name = "msmpeg4v2 encoder",
4850 .item_name = av_default_item_name,
4851 .option = ff_mpv_generic_options,
4852 .version = LIBAVUTIL_VERSION_INT,
4855 AVCodec ff_msmpeg4v2_encoder = {
4856 .name = "msmpeg4v2",
4857 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4858 .type = AVMEDIA_TYPE_VIDEO,
4859 .id = AV_CODEC_ID_MSMPEG4V2,
4860 .priv_data_size = sizeof(MpegEncContext),
4861 .init = ff_mpv_encode_init,
4862 .encode2 = ff_mpv_encode_picture,
4863 .close = ff_mpv_encode_end,
4864 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4865 .priv_class = &msmpeg4v2_class,
4868 static const AVClass msmpeg4v3_class = {
4869 .class_name = "msmpeg4v3 encoder",
4870 .item_name = av_default_item_name,
4871 .option = ff_mpv_generic_options,
4872 .version = LIBAVUTIL_VERSION_INT,
4875 AVCodec ff_msmpeg4v3_encoder = {
4877 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4878 .type = AVMEDIA_TYPE_VIDEO,
4879 .id = AV_CODEC_ID_MSMPEG4V3,
4880 .priv_data_size = sizeof(MpegEncContext),
4881 .init = ff_mpv_encode_init,
4882 .encode2 = ff_mpv_encode_picture,
4883 .close = ff_mpv_encode_end,
4884 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4885 .priv_class = &msmpeg4v3_class,
4888 static const AVClass wmv1_class = {
4889 .class_name = "wmv1 encoder",
4890 .item_name = av_default_item_name,
4891 .option = ff_mpv_generic_options,
4892 .version = LIBAVUTIL_VERSION_INT,
4895 AVCodec ff_wmv1_encoder = {
4897 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4898 .type = AVMEDIA_TYPE_VIDEO,
4899 .id = AV_CODEC_ID_WMV1,
4900 .priv_data_size = sizeof(MpegEncContext),
4901 .init = ff_mpv_encode_init,
4902 .encode2 = ff_mpv_encode_picture,
4903 .close = ff_mpv_encode_end,
4904 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4905 .priv_class = &wmv1_class,