2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "libavutil/timer.h"
46 #include "mpegvideo.h"
47 #include "mpegvideodata.h"
51 #include "mjpegenc_common.h"
53 #include "mpegutils.h"
56 #include "pixblockdsp.h"
60 #include "aandcttab.h"
62 #include "mpeg4video.h"
64 #include "bytestream.h"
71 #define QUANT_BIAS_SHIFT 8
73 #define QMAT_SHIFT_MMX 16
76 static int encode_picture(MpegEncContext *s, int picture_number);
77 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
78 static int sse_mb(MpegEncContext *s);
79 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
80 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
82 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
83 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
85 const AVOption ff_mpv_generic_options[] = {
90 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
91 uint16_t (*qmat16)[2][64],
92 const uint16_t *quant_matrix,
93 int bias, int qmin, int qmax, int intra)
95 FDCTDSPContext *fdsp = &s->fdsp;
99 for (qscale = qmin; qscale <= qmax; qscale++) {
103 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
104 else qscale2 = qscale << 1;
106 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
108 fdsp->fdct == ff_faandct ||
109 #endif /* CONFIG_FAANDCT */
110 fdsp->fdct == ff_jpeg_fdct_islow_10) {
111 for (i = 0; i < 64; i++) {
112 const int j = s->idsp.idct_permutation[i];
113 int64_t den = (int64_t) qscale2 * quant_matrix[j];
114 /* 16 <= qscale * quant_matrix[i] <= 7905
115 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
116 * 19952 <= x <= 249205026
117 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
118 * 3444240 >= (1 << 36) / (x) >= 275 */
120 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
122 } else if (fdsp->fdct == ff_fdct_ifast) {
123 for (i = 0; i < 64; i++) {
124 const int j = s->idsp.idct_permutation[i];
125 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
126 /* 16 <= qscale * quant_matrix[i] <= 7905
127 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
128 * 19952 <= x <= 249205026
129 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
130 * 3444240 >= (1 << 36) / (x) >= 275 */
132 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
135 for (i = 0; i < 64; i++) {
136 const int j = s->idsp.idct_permutation[i];
137 int64_t den = (int64_t) qscale2 * quant_matrix[j];
138 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
139 * Assume x = qscale * quant_matrix[i]
141 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
142 * so 32768 >= (1 << 19) / (x) >= 67 */
143 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
144 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
145 // (qscale * quant_matrix[i]);
146 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
148 if (qmat16[qscale][0][i] == 0 ||
149 qmat16[qscale][0][i] == 128 * 256)
150 qmat16[qscale][0][i] = 128 * 256 - 1;
151 qmat16[qscale][1][i] =
152 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
153 qmat16[qscale][0][i]);
157 for (i = intra; i < 64; i++) {
159 if (fdsp->fdct == ff_fdct_ifast) {
160 max = (8191LL * ff_aanscales[i]) >> 14;
162 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
168 av_log(NULL, AV_LOG_INFO,
169 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
174 static inline void update_qscale(MpegEncContext *s)
176 if (s->q_scale_type == 1 && 0) {
178 int bestdiff=INT_MAX;
181 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
182 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
183 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
184 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
186 if (diff < bestdiff) {
193 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
194 (FF_LAMBDA_SHIFT + 7);
195 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
198 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
202 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
208 for (i = 0; i < 64; i++) {
209 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
216 * init s->current_picture.qscale_table from s->lambda_table
218 void ff_init_qscale_tab(MpegEncContext *s)
220 int8_t * const qscale_table = s->current_picture.qscale_table;
223 for (i = 0; i < s->mb_num; i++) {
224 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
225 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
226 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
231 static void update_duplicate_context_after_me(MpegEncContext *dst,
234 #define COPY(a) dst->a= src->a
236 COPY(current_picture);
242 COPY(picture_in_gop_number);
243 COPY(gop_picture_number);
244 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
245 COPY(progressive_frame); // FIXME don't set in encode_header
246 COPY(partitioned_frame); // FIXME don't set in encode_header
251 * Set the given MpegEncContext to defaults for encoding.
252 * the changed fields will not depend upon the prior state of the MpegEncContext.
254 static void mpv_encode_defaults(MpegEncContext *s)
257 ff_mpv_common_defaults(s);
259 for (i = -16; i < 16; i++) {
260 default_fcode_tab[i + MAX_MV] = 1;
262 s->me.mv_penalty = default_mv_penalty;
263 s->fcode_tab = default_fcode_tab;
265 s->input_picture_number = 0;
266 s->picture_in_gop_number = 0;
269 av_cold int ff_dct_encode_init(MpegEncContext *s)
272 ff_dct_encode_init_x86(s);
274 if (CONFIG_H263_ENCODER)
275 ff_h263dsp_init(&s->h263dsp);
276 if (!s->dct_quantize)
277 s->dct_quantize = ff_dct_quantize_c;
279 s->denoise_dct = denoise_dct_c;
280 s->fast_dct_quantize = s->dct_quantize;
281 if (s->avctx->trellis)
282 s->dct_quantize = dct_quantize_trellis_c;
287 /* init video encoder */
288 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
290 MpegEncContext *s = avctx->priv_data;
291 AVCPBProperties *cpb_props;
292 int i, ret, format_supported;
294 mpv_encode_defaults(s);
296 switch (avctx->codec_id) {
297 case AV_CODEC_ID_MPEG2VIDEO:
298 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
299 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
300 av_log(avctx, AV_LOG_ERROR,
301 "only YUV420 and YUV422 are supported\n");
305 case AV_CODEC_ID_MJPEG:
306 case AV_CODEC_ID_AMV:
307 format_supported = 0;
308 /* JPEG color space */
309 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
310 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
311 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
312 (avctx->color_range == AVCOL_RANGE_JPEG &&
313 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
314 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
315 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
316 format_supported = 1;
317 /* MPEG color space */
318 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
319 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
320 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
321 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
322 format_supported = 1;
324 if (!format_supported) {
325 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
330 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
331 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
336 switch (avctx->pix_fmt) {
337 case AV_PIX_FMT_YUVJ444P:
338 case AV_PIX_FMT_YUV444P:
339 s->chroma_format = CHROMA_444;
341 case AV_PIX_FMT_YUVJ422P:
342 case AV_PIX_FMT_YUV422P:
343 s->chroma_format = CHROMA_422;
345 case AV_PIX_FMT_YUVJ420P:
346 case AV_PIX_FMT_YUV420P:
348 s->chroma_format = CHROMA_420;
352 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
354 #if FF_API_PRIVATE_OPT
355 FF_DISABLE_DEPRECATION_WARNINGS
356 if (avctx->rtp_payload_size)
357 s->rtp_payload_size = avctx->rtp_payload_size;
358 if (avctx->me_penalty_compensation)
359 s->me_penalty_compensation = avctx->me_penalty_compensation;
361 s->me_pre = avctx->pre_me;
362 FF_ENABLE_DEPRECATION_WARNINGS
365 s->bit_rate = avctx->bit_rate;
366 s->width = avctx->width;
367 s->height = avctx->height;
368 if (avctx->gop_size > 600 &&
369 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
370 av_log(avctx, AV_LOG_WARNING,
371 "keyframe interval too large!, reducing it from %d to %d\n",
372 avctx->gop_size, 600);
373 avctx->gop_size = 600;
375 s->gop_size = avctx->gop_size;
377 if (avctx->max_b_frames > MAX_B_FRAMES) {
378 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
379 "is %d.\n", MAX_B_FRAMES);
380 avctx->max_b_frames = MAX_B_FRAMES;
382 s->max_b_frames = avctx->max_b_frames;
383 s->codec_id = avctx->codec->id;
384 s->strict_std_compliance = avctx->strict_std_compliance;
385 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
386 s->rtp_mode = !!s->rtp_payload_size;
387 s->intra_dc_precision = avctx->intra_dc_precision;
389 // workaround some differences between how applications specify dc precision
390 if (s->intra_dc_precision < 0) {
391 s->intra_dc_precision += 8;
392 } else if (s->intra_dc_precision >= 8)
393 s->intra_dc_precision -= 8;
395 if (s->intra_dc_precision < 0) {
396 av_log(avctx, AV_LOG_ERROR,
397 "intra dc precision must be positive, note some applications use"
398 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
399 return AVERROR(EINVAL);
402 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
405 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
406 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
407 return AVERROR(EINVAL);
409 s->user_specified_pts = AV_NOPTS_VALUE;
411 if (s->gop_size <= 1) {
418 #if FF_API_MOTION_EST
419 FF_DISABLE_DEPRECATION_WARNINGS
420 s->me_method = avctx->me_method;
421 FF_ENABLE_DEPRECATION_WARNINGS
425 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
428 FF_DISABLE_DEPRECATION_WARNINGS
429 if (avctx->border_masking != 0.0)
430 s->border_masking = avctx->border_masking;
431 FF_ENABLE_DEPRECATION_WARNINGS
434 s->adaptive_quant = (s->avctx->lumi_masking ||
435 s->avctx->dark_masking ||
436 s->avctx->temporal_cplx_masking ||
437 s->avctx->spatial_cplx_masking ||
438 s->avctx->p_masking ||
440 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
443 s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
445 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
446 switch(avctx->codec_id) {
447 case AV_CODEC_ID_MPEG1VIDEO:
448 case AV_CODEC_ID_MPEG2VIDEO:
449 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
451 case AV_CODEC_ID_MPEG4:
452 case AV_CODEC_ID_MSMPEG4V1:
453 case AV_CODEC_ID_MSMPEG4V2:
454 case AV_CODEC_ID_MSMPEG4V3:
455 if (avctx->rc_max_rate >= 15000000) {
456 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
457 } else if(avctx->rc_max_rate >= 2000000) {
458 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
459 } else if(avctx->rc_max_rate >= 384000) {
460 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
462 avctx->rc_buffer_size = 40;
463 avctx->rc_buffer_size *= 16384;
466 if (avctx->rc_buffer_size) {
467 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
471 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
472 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
476 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
477 av_log(avctx, AV_LOG_INFO,
478 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
481 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
482 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
486 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
487 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
491 if (avctx->rc_max_rate &&
492 avctx->rc_max_rate == avctx->bit_rate &&
493 avctx->rc_max_rate != avctx->rc_min_rate) {
494 av_log(avctx, AV_LOG_INFO,
495 "impossible bitrate constraints, this will fail\n");
498 if (avctx->rc_buffer_size &&
499 avctx->bit_rate * (int64_t)avctx->time_base.num >
500 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
501 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
505 if (!s->fixed_qscale &&
506 avctx->bit_rate * av_q2d(avctx->time_base) >
507 avctx->bit_rate_tolerance) {
508 av_log(avctx, AV_LOG_WARNING,
509 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
510 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
513 if (s->avctx->rc_max_rate &&
514 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
515 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
516 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
517 90000LL * (avctx->rc_buffer_size - 1) >
518 s->avctx->rc_max_rate * 0xFFFFLL) {
519 av_log(avctx, AV_LOG_INFO,
520 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
521 "specified vbv buffer is too large for the given bitrate!\n");
524 if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
525 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
526 s->codec_id != AV_CODEC_ID_FLV1) {
527 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
531 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
532 av_log(avctx, AV_LOG_ERROR,
533 "OBMC is only supported with simple mb decision\n");
537 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
538 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
542 if (s->max_b_frames &&
543 s->codec_id != AV_CODEC_ID_MPEG4 &&
544 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
545 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
546 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
549 if (s->max_b_frames < 0) {
550 av_log(avctx, AV_LOG_ERROR,
551 "max b frames must be 0 or positive for mpegvideo based encoders\n");
555 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
556 s->codec_id == AV_CODEC_ID_H263 ||
557 s->codec_id == AV_CODEC_ID_H263P) &&
558 (avctx->sample_aspect_ratio.num > 255 ||
559 avctx->sample_aspect_ratio.den > 255)) {
560 av_log(avctx, AV_LOG_WARNING,
561 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
562 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
563 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
564 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
567 if ((s->codec_id == AV_CODEC_ID_H263 ||
568 s->codec_id == AV_CODEC_ID_H263P) &&
569 (avctx->width > 2048 ||
570 avctx->height > 1152 )) {
571 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
574 if ((s->codec_id == AV_CODEC_ID_H263 ||
575 s->codec_id == AV_CODEC_ID_H263P) &&
576 ((avctx->width &3) ||
577 (avctx->height&3) )) {
578 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
582 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
583 (avctx->width > 4095 ||
584 avctx->height > 4095 )) {
585 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
589 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
590 (avctx->width > 16383 ||
591 avctx->height > 16383 )) {
592 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
596 if (s->codec_id == AV_CODEC_ID_RV10 &&
598 avctx->height&15 )) {
599 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
600 return AVERROR(EINVAL);
603 if (s->codec_id == AV_CODEC_ID_RV20 &&
606 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
607 return AVERROR(EINVAL);
610 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
611 s->codec_id == AV_CODEC_ID_WMV2) &&
613 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
617 if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
618 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
619 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
623 #if FF_API_PRIVATE_OPT
624 FF_DISABLE_DEPRECATION_WARNINGS
625 if (avctx->mpeg_quant)
626 s->mpeg_quant = avctx->mpeg_quant;
627 FF_ENABLE_DEPRECATION_WARNINGS
630 // FIXME mpeg2 uses that too
631 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
632 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
633 av_log(avctx, AV_LOG_ERROR,
634 "mpeg2 style quantization not supported by codec\n");
638 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
639 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
643 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
644 s->avctx->mb_decision != FF_MB_DECISION_RD) {
645 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
649 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
650 (s->codec_id == AV_CODEC_ID_AMV ||
651 s->codec_id == AV_CODEC_ID_MJPEG)) {
652 // Used to produce garbage with MJPEG.
653 av_log(avctx, AV_LOG_ERROR,
654 "QP RD is no longer compatible with MJPEG or AMV\n");
658 #if FF_API_PRIVATE_OPT
659 FF_DISABLE_DEPRECATION_WARNINGS
660 if (avctx->scenechange_threshold)
661 s->scenechange_threshold = avctx->scenechange_threshold;
662 FF_ENABLE_DEPRECATION_WARNINGS
665 if (s->scenechange_threshold < 1000000000 &&
666 (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
667 av_log(avctx, AV_LOG_ERROR,
668 "closed gop with scene change detection are not supported yet, "
669 "set threshold to 1000000000\n");
673 if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
674 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
675 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
676 av_log(avctx, AV_LOG_ERROR,
677 "low delay forcing is only available for mpeg2, "
678 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
681 if (s->max_b_frames != 0) {
682 av_log(avctx, AV_LOG_ERROR,
683 "B-frames cannot be used with low delay\n");
688 if (s->q_scale_type == 1) {
689 if (avctx->qmax > 28) {
690 av_log(avctx, AV_LOG_ERROR,
691 "non linear quant only supports qmax <= 28 currently\n");
696 if (avctx->slices > 1 &&
697 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
698 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
699 return AVERROR(EINVAL);
702 if (s->avctx->thread_count > 1 &&
703 s->codec_id != AV_CODEC_ID_MPEG4 &&
704 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
705 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
706 s->codec_id != AV_CODEC_ID_MJPEG &&
707 (s->codec_id != AV_CODEC_ID_H263P)) {
708 av_log(avctx, AV_LOG_ERROR,
709 "multi threaded encoding not supported by codec\n");
713 if (s->avctx->thread_count < 1) {
714 av_log(avctx, AV_LOG_ERROR,
715 "automatic thread number detection not supported by codec, "
720 if (!avctx->time_base.den || !avctx->time_base.num) {
721 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
725 #if FF_API_PRIVATE_OPT
726 FF_DISABLE_DEPRECATION_WARNINGS
727 if (avctx->b_frame_strategy)
728 s->b_frame_strategy = avctx->b_frame_strategy;
729 if (avctx->b_sensitivity != 40)
730 s->b_sensitivity = avctx->b_sensitivity;
731 FF_ENABLE_DEPRECATION_WARNINGS
734 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
735 av_log(avctx, AV_LOG_INFO,
736 "notice: b_frame_strategy only affects the first pass\n");
737 s->b_frame_strategy = 0;
740 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
742 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
743 avctx->time_base.den /= i;
744 avctx->time_base.num /= i;
748 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
749 // (a + x * 3 / 8) / x
750 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
751 s->inter_quant_bias = 0;
753 s->intra_quant_bias = 0;
755 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
758 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
759 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
760 return AVERROR(EINVAL);
763 #if FF_API_QUANT_BIAS
764 FF_DISABLE_DEPRECATION_WARNINGS
765 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
766 s->intra_quant_bias = avctx->intra_quant_bias;
767 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
768 s->inter_quant_bias = avctx->inter_quant_bias;
769 FF_ENABLE_DEPRECATION_WARNINGS
772 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
774 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
775 s->avctx->time_base.den > (1 << 16) - 1) {
776 av_log(avctx, AV_LOG_ERROR,
777 "timebase %d/%d not supported by MPEG 4 standard, "
778 "the maximum admitted value for the timebase denominator "
779 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
783 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
785 switch (avctx->codec->id) {
786 case AV_CODEC_ID_MPEG1VIDEO:
787 s->out_format = FMT_MPEG1;
788 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
789 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
791 case AV_CODEC_ID_MPEG2VIDEO:
792 s->out_format = FMT_MPEG1;
793 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
794 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
797 case AV_CODEC_ID_MJPEG:
798 case AV_CODEC_ID_AMV:
799 s->out_format = FMT_MJPEG;
800 s->intra_only = 1; /* force intra only for jpeg */
801 if (!CONFIG_MJPEG_ENCODER ||
802 ff_mjpeg_encode_init(s) < 0)
807 case AV_CODEC_ID_H261:
808 if (!CONFIG_H261_ENCODER)
810 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
811 av_log(avctx, AV_LOG_ERROR,
812 "The specified picture size of %dx%d is not valid for the "
813 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
814 s->width, s->height);
817 s->out_format = FMT_H261;
820 s->rtp_mode = 0; /* Sliced encoding not supported */
822 case AV_CODEC_ID_H263:
823 if (!CONFIG_H263_ENCODER)
825 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
826 s->width, s->height) == 8) {
827 av_log(avctx, AV_LOG_ERROR,
828 "The specified picture size of %dx%d is not valid for "
829 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
830 "352x288, 704x576, and 1408x1152. "
831 "Try H.263+.\n", s->width, s->height);
834 s->out_format = FMT_H263;
838 case AV_CODEC_ID_H263P:
839 s->out_format = FMT_H263;
842 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
843 s->modified_quant = s->h263_aic;
844 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
845 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
848 /* These are just to be sure */
852 case AV_CODEC_ID_FLV1:
853 s->out_format = FMT_H263;
854 s->h263_flv = 2; /* format = 1; 11-bit codes */
855 s->unrestricted_mv = 1;
856 s->rtp_mode = 0; /* don't allow GOB */
860 case AV_CODEC_ID_RV10:
861 s->out_format = FMT_H263;
865 case AV_CODEC_ID_RV20:
866 s->out_format = FMT_H263;
869 s->modified_quant = 1;
873 s->unrestricted_mv = 0;
875 case AV_CODEC_ID_MPEG4:
876 s->out_format = FMT_H263;
878 s->unrestricted_mv = 1;
879 s->low_delay = s->max_b_frames ? 0 : 1;
880 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
882 case AV_CODEC_ID_MSMPEG4V2:
883 s->out_format = FMT_H263;
885 s->unrestricted_mv = 1;
886 s->msmpeg4_version = 2;
890 case AV_CODEC_ID_MSMPEG4V3:
891 s->out_format = FMT_H263;
893 s->unrestricted_mv = 1;
894 s->msmpeg4_version = 3;
895 s->flipflop_rounding = 1;
899 case AV_CODEC_ID_WMV1:
900 s->out_format = FMT_H263;
902 s->unrestricted_mv = 1;
903 s->msmpeg4_version = 4;
904 s->flipflop_rounding = 1;
908 case AV_CODEC_ID_WMV2:
909 s->out_format = FMT_H263;
911 s->unrestricted_mv = 1;
912 s->msmpeg4_version = 5;
913 s->flipflop_rounding = 1;
921 #if FF_API_PRIVATE_OPT
922 FF_DISABLE_DEPRECATION_WARNINGS
923 if (avctx->noise_reduction)
924 s->noise_reduction = avctx->noise_reduction;
925 FF_ENABLE_DEPRECATION_WARNINGS
928 avctx->has_b_frames = !s->low_delay;
932 s->progressive_frame =
933 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
934 AV_CODEC_FLAG_INTERLACED_ME) ||
939 if (ff_mpv_common_init(s) < 0)
942 ff_fdctdsp_init(&s->fdsp, avctx);
943 ff_me_cmp_init(&s->mecc, avctx);
944 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
945 ff_pixblockdsp_init(&s->pdsp, avctx);
946 ff_qpeldsp_init(&s->qdsp);
948 if (s->msmpeg4_version) {
949 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
950 2 * 2 * (MAX_LEVEL + 1) *
951 (MAX_RUN + 1) * 2 * sizeof(int), fail);
953 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
955 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
956 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
957 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
958 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
959 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
960 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
961 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
962 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
963 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
964 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
967 if (s->noise_reduction) {
968 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
969 2 * 64 * sizeof(uint16_t), fail);
972 ff_dct_encode_init(s);
974 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
975 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
977 if (s->slice_context_count > 1) {
980 if (avctx->codec_id == AV_CODEC_ID_H263P)
981 s->h263_slice_structured = 1;
984 s->quant_precision = 5;
986 #if FF_API_PRIVATE_OPT
987 FF_DISABLE_DEPRECATION_WARNINGS
988 if (avctx->frame_skip_threshold)
989 s->frame_skip_threshold = avctx->frame_skip_threshold;
990 if (avctx->frame_skip_factor)
991 s->frame_skip_factor = avctx->frame_skip_factor;
992 if (avctx->frame_skip_exp)
993 s->frame_skip_exp = avctx->frame_skip_exp;
994 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
995 s->frame_skip_cmp = avctx->frame_skip_cmp;
996 FF_ENABLE_DEPRECATION_WARNINGS
999 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
1000 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
1002 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
1003 ff_h261_encode_init(s);
1004 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
1005 ff_h263_encode_init(s);
1006 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
1007 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
1009 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1010 && s->out_format == FMT_MPEG1)
1011 ff_mpeg1_encode_init(s);
1014 for (i = 0; i < 64; i++) {
1015 int j = s->idsp.idct_permutation[i];
1016 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1018 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1019 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1020 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1021 s->intra_matrix[j] =
1022 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1025 s->chroma_intra_matrix[j] =
1026 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1027 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1029 if (s->avctx->intra_matrix)
1030 s->intra_matrix[j] = s->avctx->intra_matrix[i];
1031 if (s->avctx->inter_matrix)
1032 s->inter_matrix[j] = s->avctx->inter_matrix[i];
1035 /* precompute matrix */
1036 /* for mjpeg, we do include qscale in the matrix */
1037 if (s->out_format != FMT_MJPEG) {
1038 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1039 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1041 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1042 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1046 #if FF_API_RC_STRATEGY
1047 FF_DISABLE_DEPRECATION_WARNINGS
1048 if (!s->rc_strategy)
1049 s->rc_strategy = s->avctx->rc_strategy;
1050 FF_ENABLE_DEPRECATION_WARNINGS
1053 if (ff_rate_control_init(s) < 0)
1056 #if FF_API_RC_STRATEGY
1057 av_assert0(MPV_RC_STRATEGY_XVID == FF_RC_STRATEGY_XVID);
1060 if ((s->avctx->flags & AV_CODEC_FLAG_PASS2) && s->rc_strategy == MPV_RC_STRATEGY_XVID) {
1062 ret = ff_xvid_rate_control_init(s);
1064 ret = AVERROR(ENOSYS);
1065 av_log(s->avctx, AV_LOG_ERROR,
1066 "Xvid ratecontrol requires libavcodec compiled with Xvid support.\n");
1072 #if FF_API_ERROR_RATE
1073 FF_DISABLE_DEPRECATION_WARNINGS
1074 if (avctx->error_rate)
1075 s->error_rate = avctx->error_rate;
1076 FF_ENABLE_DEPRECATION_WARNINGS;
1079 #if FF_API_NORMALIZE_AQP
1080 FF_DISABLE_DEPRECATION_WARNINGS
1081 if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
1082 s->mpv_flags |= FF_MPV_FLAG_NAQ;
1083 FF_ENABLE_DEPRECATION_WARNINGS;
1087 FF_DISABLE_DEPRECATION_WARNINGS
1088 if (avctx->flags & CODEC_FLAG_MV0)
1089 s->mpv_flags |= FF_MPV_FLAG_MV0;
1090 FF_ENABLE_DEPRECATION_WARNINGS
1094 FF_DISABLE_DEPRECATION_WARNINGS
1095 if (avctx->rc_qsquish != 0.0)
1096 s->rc_qsquish = avctx->rc_qsquish;
1097 if (avctx->rc_qmod_amp != 0.0)
1098 s->rc_qmod_amp = avctx->rc_qmod_amp;
1099 if (avctx->rc_qmod_freq)
1100 s->rc_qmod_freq = avctx->rc_qmod_freq;
1101 if (avctx->rc_buffer_aggressivity != 1.0)
1102 s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
1103 if (avctx->rc_initial_cplx != 0.0)
1104 s->rc_initial_cplx = avctx->rc_initial_cplx;
1106 s->lmin = avctx->lmin;
1108 s->lmax = avctx->lmax;
1111 av_freep(&s->rc_eq);
1112 s->rc_eq = av_strdup(avctx->rc_eq);
1114 return AVERROR(ENOMEM);
1116 FF_ENABLE_DEPRECATION_WARNINGS
1119 #if FF_API_PRIVATE_OPT
1120 FF_DISABLE_DEPRECATION_WARNINGS
1121 if (avctx->brd_scale)
1122 s->brd_scale = avctx->brd_scale;
1124 if (avctx->prediction_method)
1125 s->pred = avctx->prediction_method + 1;
1126 FF_ENABLE_DEPRECATION_WARNINGS
1129 if (s->b_frame_strategy == 2) {
1130 for (i = 0; i < s->max_b_frames + 2; i++) {
1131 s->tmp_frames[i] = av_frame_alloc();
1132 if (!s->tmp_frames[i])
1133 return AVERROR(ENOMEM);
1135 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1136 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1137 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1139 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1145 cpb_props = ff_add_cpb_side_data(avctx);
1147 return AVERROR(ENOMEM);
1148 cpb_props->max_bitrate = avctx->rc_max_rate;
1149 cpb_props->min_bitrate = avctx->rc_min_rate;
1150 cpb_props->avg_bitrate = avctx->bit_rate;
1151 cpb_props->buffer_size = avctx->rc_buffer_size;
1155 ff_mpv_encode_end(avctx);
1156 return AVERROR_UNKNOWN;
1159 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1161 MpegEncContext *s = avctx->priv_data;
1164 ff_rate_control_uninit(s);
1166 if ((avctx->flags & AV_CODEC_FLAG_PASS2) && s->rc_strategy == MPV_RC_STRATEGY_XVID)
1167 ff_xvid_rate_control_uninit(s);
1170 ff_mpv_common_end(s);
1171 if (CONFIG_MJPEG_ENCODER &&
1172 s->out_format == FMT_MJPEG)
1173 ff_mjpeg_encode_close(s);
1175 av_freep(&avctx->extradata);
1177 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1178 av_frame_free(&s->tmp_frames[i]);
1180 ff_free_picture_tables(&s->new_picture);
1181 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1183 av_freep(&s->avctx->stats_out);
1184 av_freep(&s->ac_stats);
1186 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1187 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1188 s->q_chroma_intra_matrix= NULL;
1189 s->q_chroma_intra_matrix16= NULL;
1190 av_freep(&s->q_intra_matrix);
1191 av_freep(&s->q_inter_matrix);
1192 av_freep(&s->q_intra_matrix16);
1193 av_freep(&s->q_inter_matrix16);
1194 av_freep(&s->input_picture);
1195 av_freep(&s->reordered_input_picture);
1196 av_freep(&s->dct_offset);
1201 static int get_sae(uint8_t *src, int ref, int stride)
1206 for (y = 0; y < 16; y++) {
1207 for (x = 0; x < 16; x++) {
1208 acc += FFABS(src[x + y * stride] - ref);
1215 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1216 uint8_t *ref, int stride)
1222 h = s->height & ~15;
1224 for (y = 0; y < h; y += 16) {
1225 for (x = 0; x < w; x += 16) {
1226 int offset = x + y * stride;
1227 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1229 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1230 int sae = get_sae(src + offset, mean, stride);
1232 acc += sae + 500 < sad;
1238 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1240 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1241 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1242 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1243 &s->linesize, &s->uvlinesize);
1246 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1248 Picture *pic = NULL;
1250 int i, display_picture_number = 0, ret;
1251 int encoding_delay = s->max_b_frames ? s->max_b_frames
1252 : (s->low_delay ? 0 : 1);
1253 int flush_offset = 1;
1258 display_picture_number = s->input_picture_number++;
1260 if (pts != AV_NOPTS_VALUE) {
1261 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1262 int64_t last = s->user_specified_pts;
1265 av_log(s->avctx, AV_LOG_ERROR,
1266 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1268 return AVERROR(EINVAL);
1271 if (!s->low_delay && display_picture_number == 1)
1272 s->dts_delta = pts - last;
1274 s->user_specified_pts = pts;
1276 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1277 s->user_specified_pts =
1278 pts = s->user_specified_pts + 1;
1279 av_log(s->avctx, AV_LOG_INFO,
1280 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1283 pts = display_picture_number;
1287 if (!pic_arg->buf[0] ||
1288 pic_arg->linesize[0] != s->linesize ||
1289 pic_arg->linesize[1] != s->uvlinesize ||
1290 pic_arg->linesize[2] != s->uvlinesize)
1292 if ((s->width & 15) || (s->height & 15))
1294 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1296 if (s->linesize & (STRIDE_ALIGN-1))
1299 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1300 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1302 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1306 pic = &s->picture[i];
1310 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1313 ret = alloc_picture(s, pic, direct);
1318 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1319 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1320 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1323 int h_chroma_shift, v_chroma_shift;
1324 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1328 for (i = 0; i < 3; i++) {
1329 int src_stride = pic_arg->linesize[i];
1330 int dst_stride = i ? s->uvlinesize : s->linesize;
1331 int h_shift = i ? h_chroma_shift : 0;
1332 int v_shift = i ? v_chroma_shift : 0;
1333 int w = s->width >> h_shift;
1334 int h = s->height >> v_shift;
1335 uint8_t *src = pic_arg->data[i];
1336 uint8_t *dst = pic->f->data[i];
1339 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1340 && !s->progressive_sequence
1341 && FFALIGN(s->height, 32) - s->height > 16)
1344 if (!s->avctx->rc_buffer_size)
1345 dst += INPLACE_OFFSET;
1347 if (src_stride == dst_stride)
1348 memcpy(dst, src, src_stride * h);
1351 uint8_t *dst2 = dst;
1353 memcpy(dst2, src, w);
1358 if ((s->width & 15) || (s->height & (vpad-1))) {
1359 s->mpvencdsp.draw_edges(dst, dst_stride,
1369 ret = av_frame_copy_props(pic->f, pic_arg);
1373 pic->f->display_picture_number = display_picture_number;
1374 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1376 /* Flushing: When we have not received enough input frames,
1377 * ensure s->input_picture[0] contains the first picture */
1378 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1379 if (s->input_picture[flush_offset])
1382 if (flush_offset <= 1)
1385 encoding_delay = encoding_delay - flush_offset + 1;
1388 /* shift buffer entries */
1389 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1390 s->input_picture[i - flush_offset] = s->input_picture[i];
1392 s->input_picture[encoding_delay] = (Picture*) pic;
1397 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1401 int64_t score64 = 0;
1403 for (plane = 0; plane < 3; plane++) {
1404 const int stride = p->f->linesize[plane];
1405 const int bw = plane ? 1 : 2;
1406 for (y = 0; y < s->mb_height * bw; y++) {
1407 for (x = 0; x < s->mb_width * bw; x++) {
1408 int off = p->shared ? 0 : 16;
1409 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1410 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1411 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1413 switch (FFABS(s->frame_skip_exp)) {
1414 case 0: score = FFMAX(score, v); break;
1415 case 1: score += FFABS(v); break;
1416 case 2: score64 += v * (int64_t)v; break;
1417 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1418 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1427 if (s->frame_skip_exp < 0)
1428 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1429 -1.0/s->frame_skip_exp);
1431 if (score64 < s->frame_skip_threshold)
1433 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1438 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1440 AVPacket pkt = { 0 };
1444 av_init_packet(&pkt);
1446 ret = avcodec_send_frame(c, frame);
1451 ret = avcodec_receive_packet(c, &pkt);
1454 av_packet_unref(&pkt);
1455 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1462 static int estimate_best_b_count(MpegEncContext *s)
1464 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1465 const int scale = s->brd_scale;
1466 int width = s->width >> scale;
1467 int height = s->height >> scale;
1468 int i, j, out_size, p_lambda, b_lambda, lambda2;
1469 int64_t best_rd = INT64_MAX;
1470 int best_b_count = -1;
1473 av_assert0(scale >= 0 && scale <= 3);
1476 //s->next_picture_ptr->quality;
1477 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1478 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1479 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1480 if (!b_lambda) // FIXME we should do this somewhere else
1481 b_lambda = p_lambda;
1482 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1485 for (i = 0; i < s->max_b_frames + 2; i++) {
1486 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1487 s->next_picture_ptr;
1490 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1491 pre_input = *pre_input_ptr;
1492 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1494 if (!pre_input.shared && i) {
1495 data[0] += INPLACE_OFFSET;
1496 data[1] += INPLACE_OFFSET;
1497 data[2] += INPLACE_OFFSET;
1500 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1501 s->tmp_frames[i]->linesize[0],
1503 pre_input.f->linesize[0],
1505 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1506 s->tmp_frames[i]->linesize[1],
1508 pre_input.f->linesize[1],
1509 width >> 1, height >> 1);
1510 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1511 s->tmp_frames[i]->linesize[2],
1513 pre_input.f->linesize[2],
1514 width >> 1, height >> 1);
1518 for (j = 0; j < s->max_b_frames + 1; j++) {
1522 if (!s->input_picture[j])
1525 c = avcodec_alloc_context3(NULL);
1527 return AVERROR(ENOMEM);
1531 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1532 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1533 c->mb_decision = s->avctx->mb_decision;
1534 c->me_cmp = s->avctx->me_cmp;
1535 c->mb_cmp = s->avctx->mb_cmp;
1536 c->me_sub_cmp = s->avctx->me_sub_cmp;
1537 c->pix_fmt = AV_PIX_FMT_YUV420P;
1538 c->time_base = s->avctx->time_base;
1539 c->max_b_frames = s->max_b_frames;
1541 ret = avcodec_open2(c, codec, NULL);
1545 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1546 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1548 out_size = encode_frame(c, s->tmp_frames[0]);
1554 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1556 for (i = 0; i < s->max_b_frames + 1; i++) {
1557 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1559 s->tmp_frames[i + 1]->pict_type = is_p ?
1560 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1561 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1563 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1569 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1572 /* get the delayed frames */
1573 out_size = encode_frame(c, NULL);
1578 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1580 rd += c->error[0] + c->error[1] + c->error[2];
1588 avcodec_free_context(&c);
1593 return best_b_count;
1596 static int select_input_picture(MpegEncContext *s)
1600 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1601 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1602 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1604 /* set next picture type & ordering */
1605 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1606 if (s->frame_skip_threshold || s->frame_skip_factor) {
1607 if (s->picture_in_gop_number < s->gop_size &&
1608 s->next_picture_ptr &&
1609 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1610 // FIXME check that the gop check above is +-1 correct
1611 av_frame_unref(s->input_picture[0]->f);
1613 ff_vbv_update(s, 0);
1619 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1620 !s->next_picture_ptr || s->intra_only) {
1621 s->reordered_input_picture[0] = s->input_picture[0];
1622 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1623 s->reordered_input_picture[0]->f->coded_picture_number =
1624 s->coded_picture_number++;
1628 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1629 for (i = 0; i < s->max_b_frames + 1; i++) {
1630 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1632 if (pict_num >= s->rc_context.num_entries)
1634 if (!s->input_picture[i]) {
1635 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1639 s->input_picture[i]->f->pict_type =
1640 s->rc_context.entry[pict_num].new_pict_type;
1644 if (s->b_frame_strategy == 0) {
1645 b_frames = s->max_b_frames;
1646 while (b_frames && !s->input_picture[b_frames])
1648 } else if (s->b_frame_strategy == 1) {
1649 for (i = 1; i < s->max_b_frames + 1; i++) {
1650 if (s->input_picture[i] &&
1651 s->input_picture[i]->b_frame_score == 0) {
1652 s->input_picture[i]->b_frame_score =
1654 s->input_picture[i ]->f->data[0],
1655 s->input_picture[i - 1]->f->data[0],
1659 for (i = 0; i < s->max_b_frames + 1; i++) {
1660 if (!s->input_picture[i] ||
1661 s->input_picture[i]->b_frame_score - 1 >
1662 s->mb_num / s->b_sensitivity)
1666 b_frames = FFMAX(0, i - 1);
1669 for (i = 0; i < b_frames + 1; i++) {
1670 s->input_picture[i]->b_frame_score = 0;
1672 } else if (s->b_frame_strategy == 2) {
1673 b_frames = estimate_best_b_count(s);
1680 for (i = b_frames - 1; i >= 0; i--) {
1681 int type = s->input_picture[i]->f->pict_type;
1682 if (type && type != AV_PICTURE_TYPE_B)
1685 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1686 b_frames == s->max_b_frames) {
1687 av_log(s->avctx, AV_LOG_ERROR,
1688 "warning, too many B-frames in a row\n");
1691 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1692 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1693 s->gop_size > s->picture_in_gop_number) {
1694 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1696 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1698 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1702 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1703 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1706 s->reordered_input_picture[0] = s->input_picture[b_frames];
1707 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1708 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1709 s->reordered_input_picture[0]->f->coded_picture_number =
1710 s->coded_picture_number++;
1711 for (i = 0; i < b_frames; i++) {
1712 s->reordered_input_picture[i + 1] = s->input_picture[i];
1713 s->reordered_input_picture[i + 1]->f->pict_type =
1715 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1716 s->coded_picture_number++;
1721 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1723 if (s->reordered_input_picture[0]) {
1724 s->reordered_input_picture[0]->reference =
1725 s->reordered_input_picture[0]->f->pict_type !=
1726 AV_PICTURE_TYPE_B ? 3 : 0;
1728 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1731 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1732 // input is a shared pix, so we can't modify it -> allocate a new
1733 // one & ensure that the shared one is reuseable
1736 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1739 pic = &s->picture[i];
1741 pic->reference = s->reordered_input_picture[0]->reference;
1742 if (alloc_picture(s, pic, 0) < 0) {
1746 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1750 /* mark us unused / free shared pic */
1751 av_frame_unref(s->reordered_input_picture[0]->f);
1752 s->reordered_input_picture[0]->shared = 0;
1754 s->current_picture_ptr = pic;
1756 // input is not a shared pix -> reuse buffer for current_pix
1757 s->current_picture_ptr = s->reordered_input_picture[0];
1758 for (i = 0; i < 4; i++) {
1759 s->new_picture.f->data[i] += INPLACE_OFFSET;
1762 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1763 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1764 s->current_picture_ptr)) < 0)
1767 s->picture_number = s->new_picture.f->display_picture_number;
1772 static void frame_end(MpegEncContext *s)
1774 if (s->unrestricted_mv &&
1775 s->current_picture.reference &&
1777 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1778 int hshift = desc->log2_chroma_w;
1779 int vshift = desc->log2_chroma_h;
1780 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1781 s->current_picture.f->linesize[0],
1782 s->h_edge_pos, s->v_edge_pos,
1783 EDGE_WIDTH, EDGE_WIDTH,
1784 EDGE_TOP | EDGE_BOTTOM);
1785 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1786 s->current_picture.f->linesize[1],
1787 s->h_edge_pos >> hshift,
1788 s->v_edge_pos >> vshift,
1789 EDGE_WIDTH >> hshift,
1790 EDGE_WIDTH >> vshift,
1791 EDGE_TOP | EDGE_BOTTOM);
1792 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1793 s->current_picture.f->linesize[2],
1794 s->h_edge_pos >> hshift,
1795 s->v_edge_pos >> vshift,
1796 EDGE_WIDTH >> hshift,
1797 EDGE_WIDTH >> vshift,
1798 EDGE_TOP | EDGE_BOTTOM);
1803 s->last_pict_type = s->pict_type;
1804 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1805 if (s->pict_type!= AV_PICTURE_TYPE_B)
1806 s->last_non_b_pict_type = s->pict_type;
1808 #if FF_API_CODED_FRAME
1809 FF_DISABLE_DEPRECATION_WARNINGS
1810 av_frame_unref(s->avctx->coded_frame);
1811 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1812 FF_ENABLE_DEPRECATION_WARNINGS
1814 #if FF_API_ERROR_FRAME
1815 FF_DISABLE_DEPRECATION_WARNINGS
1816 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1817 sizeof(s->current_picture.encoding_error));
1818 FF_ENABLE_DEPRECATION_WARNINGS
1822 static void update_noise_reduction(MpegEncContext *s)
1826 for (intra = 0; intra < 2; intra++) {
1827 if (s->dct_count[intra] > (1 << 16)) {
1828 for (i = 0; i < 64; i++) {
1829 s->dct_error_sum[intra][i] >>= 1;
1831 s->dct_count[intra] >>= 1;
1834 for (i = 0; i < 64; i++) {
1835 s->dct_offset[intra][i] = (s->noise_reduction *
1836 s->dct_count[intra] +
1837 s->dct_error_sum[intra][i] / 2) /
1838 (s->dct_error_sum[intra][i] + 1);
1843 static int frame_start(MpegEncContext *s)
1847 /* mark & release old frames */
1848 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1849 s->last_picture_ptr != s->next_picture_ptr &&
1850 s->last_picture_ptr->f->buf[0]) {
1851 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1854 s->current_picture_ptr->f->pict_type = s->pict_type;
1855 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1857 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1858 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1859 s->current_picture_ptr)) < 0)
1862 if (s->pict_type != AV_PICTURE_TYPE_B) {
1863 s->last_picture_ptr = s->next_picture_ptr;
1865 s->next_picture_ptr = s->current_picture_ptr;
1868 if (s->last_picture_ptr) {
1869 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1870 if (s->last_picture_ptr->f->buf[0] &&
1871 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1872 s->last_picture_ptr)) < 0)
1875 if (s->next_picture_ptr) {
1876 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1877 if (s->next_picture_ptr->f->buf[0] &&
1878 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1879 s->next_picture_ptr)) < 0)
1883 if (s->picture_structure!= PICT_FRAME) {
1885 for (i = 0; i < 4; i++) {
1886 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1887 s->current_picture.f->data[i] +=
1888 s->current_picture.f->linesize[i];
1890 s->current_picture.f->linesize[i] *= 2;
1891 s->last_picture.f->linesize[i] *= 2;
1892 s->next_picture.f->linesize[i] *= 2;
1896 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1897 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1898 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1899 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1900 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1901 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1903 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1904 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1907 if (s->dct_error_sum) {
1908 av_assert2(s->noise_reduction && s->encoding);
1909 update_noise_reduction(s);
1915 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1916 const AVFrame *pic_arg, int *got_packet)
1918 MpegEncContext *s = avctx->priv_data;
1919 int i, stuffing_count, ret;
1920 int context_count = s->slice_context_count;
1922 s->vbv_ignore_qmax = 0;
1924 s->picture_in_gop_number++;
1926 if (load_input_picture(s, pic_arg) < 0)
1929 if (select_input_picture(s) < 0) {
1934 if (s->new_picture.f->data[0]) {
1935 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1936 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1938 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1939 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1942 s->mb_info_ptr = av_packet_new_side_data(pkt,
1943 AV_PKT_DATA_H263_MB_INFO,
1944 s->mb_width*s->mb_height*12);
1945 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1948 for (i = 0; i < context_count; i++) {
1949 int start_y = s->thread_context[i]->start_mb_y;
1950 int end_y = s->thread_context[i]-> end_mb_y;
1951 int h = s->mb_height;
1952 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1953 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1955 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1958 s->pict_type = s->new_picture.f->pict_type;
1960 ret = frame_start(s);
1964 ret = encode_picture(s, s->picture_number);
1965 if (growing_buffer) {
1966 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1967 pkt->data = s->pb.buf;
1968 pkt->size = avctx->internal->byte_buffer_size;
1973 #if FF_API_STAT_BITS
1974 FF_DISABLE_DEPRECATION_WARNINGS
1975 avctx->header_bits = s->header_bits;
1976 avctx->mv_bits = s->mv_bits;
1977 avctx->misc_bits = s->misc_bits;
1978 avctx->i_tex_bits = s->i_tex_bits;
1979 avctx->p_tex_bits = s->p_tex_bits;
1980 avctx->i_count = s->i_count;
1981 // FIXME f/b_count in avctx
1982 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1983 avctx->skip_count = s->skip_count;
1984 FF_ENABLE_DEPRECATION_WARNINGS
1989 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1990 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1992 if (avctx->rc_buffer_size) {
1993 RateControlContext *rcc = &s->rc_context;
1994 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1995 int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1996 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1998 if (put_bits_count(&s->pb) > max_size &&
1999 s->lambda < s->lmax) {
2000 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
2001 (s->qscale + 1) / s->qscale);
2002 if (s->adaptive_quant) {
2004 for (i = 0; i < s->mb_height * s->mb_stride; i++)
2005 s->lambda_table[i] =
2006 FFMAX(s->lambda_table[i] + min_step,
2007 s->lambda_table[i] * (s->qscale + 1) /
2010 s->mb_skipped = 0; // done in frame_start()
2011 // done in encode_picture() so we must undo it
2012 if (s->pict_type == AV_PICTURE_TYPE_P) {
2013 if (s->flipflop_rounding ||
2014 s->codec_id == AV_CODEC_ID_H263P ||
2015 s->codec_id == AV_CODEC_ID_MPEG4)
2016 s->no_rounding ^= 1;
2018 if (s->pict_type != AV_PICTURE_TYPE_B) {
2019 s->time_base = s->last_time_base;
2020 s->last_non_b_time = s->time - s->pp_time;
2022 for (i = 0; i < context_count; i++) {
2023 PutBitContext *pb = &s->thread_context[i]->pb;
2024 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
2026 s->vbv_ignore_qmax = 1;
2027 av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
2031 av_assert0(s->avctx->rc_max_rate);
2034 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
2035 ff_write_pass1_stats(s);
2037 for (i = 0; i < 4; i++) {
2038 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
2039 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
2041 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
2042 s->current_picture_ptr->encoding_error,
2043 (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
2046 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
2047 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
2048 s->misc_bits + s->i_tex_bits +
2050 flush_put_bits(&s->pb);
2051 s->frame_bits = put_bits_count(&s->pb);
2053 stuffing_count = ff_vbv_update(s, s->frame_bits);
2054 s->stuffing_bits = 8*stuffing_count;
2055 if (stuffing_count) {
2056 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
2057 stuffing_count + 50) {
2058 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
2062 switch (s->codec_id) {
2063 case AV_CODEC_ID_MPEG1VIDEO:
2064 case AV_CODEC_ID_MPEG2VIDEO:
2065 while (stuffing_count--) {
2066 put_bits(&s->pb, 8, 0);
2069 case AV_CODEC_ID_MPEG4:
2070 put_bits(&s->pb, 16, 0);
2071 put_bits(&s->pb, 16, 0x1C3);
2072 stuffing_count -= 4;
2073 while (stuffing_count--) {
2074 put_bits(&s->pb, 8, 0xFF);
2078 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2080 flush_put_bits(&s->pb);
2081 s->frame_bits = put_bits_count(&s->pb);
2084 /* update MPEG-1/2 vbv_delay for CBR */
2085 if (s->avctx->rc_max_rate &&
2086 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
2087 s->out_format == FMT_MPEG1 &&
2088 90000LL * (avctx->rc_buffer_size - 1) <=
2089 s->avctx->rc_max_rate * 0xFFFFLL) {
2090 AVCPBProperties *props;
2093 int vbv_delay, min_delay;
2094 double inbits = s->avctx->rc_max_rate *
2095 av_q2d(s->avctx->time_base);
2096 int minbits = s->frame_bits - 8 *
2097 (s->vbv_delay_ptr - s->pb.buf - 1);
2098 double bits = s->rc_context.buffer_index + minbits - inbits;
2101 av_log(s->avctx, AV_LOG_ERROR,
2102 "Internal error, negative bits\n");
2104 assert(s->repeat_first_field == 0);
2106 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
2107 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
2108 s->avctx->rc_max_rate;
2110 vbv_delay = FFMAX(vbv_delay, min_delay);
2112 av_assert0(vbv_delay < 0xFFFF);
2114 s->vbv_delay_ptr[0] &= 0xF8;
2115 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2116 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2117 s->vbv_delay_ptr[2] &= 0x07;
2118 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2120 props = av_cpb_properties_alloc(&props_size);
2122 return AVERROR(ENOMEM);
2123 props->vbv_delay = vbv_delay * 300;
2125 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2126 (uint8_t*)props, props_size);
2132 #if FF_API_VBV_DELAY
2133 FF_DISABLE_DEPRECATION_WARNINGS
2134 avctx->vbv_delay = vbv_delay * 300;
2135 FF_ENABLE_DEPRECATION_WARNINGS
2138 s->total_bits += s->frame_bits;
2139 #if FF_API_STAT_BITS
2140 FF_DISABLE_DEPRECATION_WARNINGS
2141 avctx->frame_bits = s->frame_bits;
2142 FF_ENABLE_DEPRECATION_WARNINGS
2146 pkt->pts = s->current_picture.f->pts;
2147 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2148 if (!s->current_picture.f->coded_picture_number)
2149 pkt->dts = pkt->pts - s->dts_delta;
2151 pkt->dts = s->reordered_pts;
2152 s->reordered_pts = pkt->pts;
2154 pkt->dts = pkt->pts;
2155 if (s->current_picture.f->key_frame)
2156 pkt->flags |= AV_PKT_FLAG_KEY;
2158 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2163 /* release non-reference frames */
2164 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2165 if (!s->picture[i].reference)
2166 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2169 av_assert1((s->frame_bits & 7) == 0);
2171 pkt->size = s->frame_bits / 8;
2172 *got_packet = !!pkt->size;
2176 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2177 int n, int threshold)
2179 static const char tab[64] = {
2180 3, 2, 2, 1, 1, 1, 1, 1,
2181 1, 1, 1, 1, 1, 1, 1, 1,
2182 1, 1, 1, 1, 1, 1, 1, 1,
2183 0, 0, 0, 0, 0, 0, 0, 0,
2184 0, 0, 0, 0, 0, 0, 0, 0,
2185 0, 0, 0, 0, 0, 0, 0, 0,
2186 0, 0, 0, 0, 0, 0, 0, 0,
2187 0, 0, 0, 0, 0, 0, 0, 0
2192 int16_t *block = s->block[n];
2193 const int last_index = s->block_last_index[n];
2196 if (threshold < 0) {
2198 threshold = -threshold;
2202 /* Are all we could set to zero already zero? */
2203 if (last_index <= skip_dc - 1)
2206 for (i = 0; i <= last_index; i++) {
2207 const int j = s->intra_scantable.permutated[i];
2208 const int level = FFABS(block[j]);
2210 if (skip_dc && i == 0)
2214 } else if (level > 1) {
2220 if (score >= threshold)
2222 for (i = skip_dc; i <= last_index; i++) {
2223 const int j = s->intra_scantable.permutated[i];
2227 s->block_last_index[n] = 0;
2229 s->block_last_index[n] = -1;
2232 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2236 const int maxlevel = s->max_qcoeff;
2237 const int minlevel = s->min_qcoeff;
2241 i = 1; // skip clipping of intra dc
2245 for (; i <= last_index; i++) {
2246 const int j = s->intra_scantable.permutated[i];
2247 int level = block[j];
2249 if (level > maxlevel) {
2252 } else if (level < minlevel) {
2260 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2261 av_log(s->avctx, AV_LOG_INFO,
2262 "warning, clipping %d dct coefficients to %d..%d\n",
2263 overflow, minlevel, maxlevel);
2266 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2270 for (y = 0; y < 8; y++) {
2271 for (x = 0; x < 8; x++) {
2277 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2278 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2279 int v = ptr[x2 + y2 * stride];
2285 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2290 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2291 int motion_x, int motion_y,
2292 int mb_block_height,
2296 int16_t weight[12][64];
2297 int16_t orig[12][64];
2298 const int mb_x = s->mb_x;
2299 const int mb_y = s->mb_y;
2302 int dct_offset = s->linesize * 8; // default for progressive frames
2303 int uv_dct_offset = s->uvlinesize * 8;
2304 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2305 ptrdiff_t wrap_y, wrap_c;
2307 for (i = 0; i < mb_block_count; i++)
2308 skip_dct[i] = s->skipdct;
2310 if (s->adaptive_quant) {
2311 const int last_qp = s->qscale;
2312 const int mb_xy = mb_x + mb_y * s->mb_stride;
2314 s->lambda = s->lambda_table[mb_xy];
2317 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2318 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2319 s->dquant = s->qscale - last_qp;
2321 if (s->out_format == FMT_H263) {
2322 s->dquant = av_clip(s->dquant, -2, 2);
2324 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2326 if (s->pict_type == AV_PICTURE_TYPE_B) {
2327 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2330 if (s->mv_type == MV_TYPE_8X8)
2336 ff_set_qscale(s, last_qp + s->dquant);
2337 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2338 ff_set_qscale(s, s->qscale + s->dquant);
2340 wrap_y = s->linesize;
2341 wrap_c = s->uvlinesize;
2342 ptr_y = s->new_picture.f->data[0] +
2343 (mb_y * 16 * wrap_y) + mb_x * 16;
2344 ptr_cb = s->new_picture.f->data[1] +
2345 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2346 ptr_cr = s->new_picture.f->data[2] +
2347 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2349 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2350 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2351 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2352 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2353 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2355 16, 16, mb_x * 16, mb_y * 16,
2356 s->width, s->height);
2358 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2360 mb_block_width, mb_block_height,
2361 mb_x * mb_block_width, mb_y * mb_block_height,
2363 ptr_cb = ebuf + 16 * wrap_y;
2364 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2366 mb_block_width, mb_block_height,
2367 mb_x * mb_block_width, mb_y * mb_block_height,
2369 ptr_cr = ebuf + 16 * wrap_y + 16;
2373 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2374 int progressive_score, interlaced_score;
2376 s->interlaced_dct = 0;
2377 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2378 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2379 NULL, wrap_y, 8) - 400;
2381 if (progressive_score > 0) {
2382 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2383 NULL, wrap_y * 2, 8) +
2384 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2385 NULL, wrap_y * 2, 8);
2386 if (progressive_score > interlaced_score) {
2387 s->interlaced_dct = 1;
2389 dct_offset = wrap_y;
2390 uv_dct_offset = wrap_c;
2392 if (s->chroma_format == CHROMA_422 ||
2393 s->chroma_format == CHROMA_444)
2399 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2400 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2401 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2402 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2404 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2408 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2409 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2410 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2411 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2412 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2413 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2414 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2415 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2416 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2417 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2418 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2419 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2423 op_pixels_func (*op_pix)[4];
2424 qpel_mc_func (*op_qpix)[16];
2425 uint8_t *dest_y, *dest_cb, *dest_cr;
2427 dest_y = s->dest[0];
2428 dest_cb = s->dest[1];
2429 dest_cr = s->dest[2];
2431 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2432 op_pix = s->hdsp.put_pixels_tab;
2433 op_qpix = s->qdsp.put_qpel_pixels_tab;
2435 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2436 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2439 if (s->mv_dir & MV_DIR_FORWARD) {
2440 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2441 s->last_picture.f->data,
2443 op_pix = s->hdsp.avg_pixels_tab;
2444 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2446 if (s->mv_dir & MV_DIR_BACKWARD) {
2447 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2448 s->next_picture.f->data,
2452 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2453 int progressive_score, interlaced_score;
2455 s->interlaced_dct = 0;
2456 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2457 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2461 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2462 progressive_score -= 400;
2464 if (progressive_score > 0) {
2465 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2467 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2471 if (progressive_score > interlaced_score) {
2472 s->interlaced_dct = 1;
2474 dct_offset = wrap_y;
2475 uv_dct_offset = wrap_c;
2477 if (s->chroma_format == CHROMA_422)
2483 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2484 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2485 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2486 dest_y + dct_offset, wrap_y);
2487 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2488 dest_y + dct_offset + 8, wrap_y);
2490 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2494 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2495 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2496 if (!s->chroma_y_shift) { /* 422 */
2497 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2498 dest_cb + uv_dct_offset, wrap_c);
2499 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2500 dest_cr + uv_dct_offset, wrap_c);
2503 /* pre quantization */
2504 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2505 2 * s->qscale * s->qscale) {
2507 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2509 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2511 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2512 wrap_y, 8) < 20 * s->qscale)
2514 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2515 wrap_y, 8) < 20 * s->qscale)
2517 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2519 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2521 if (!s->chroma_y_shift) { /* 422 */
2522 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2523 dest_cb + uv_dct_offset,
2524 wrap_c, 8) < 20 * s->qscale)
2526 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2527 dest_cr + uv_dct_offset,
2528 wrap_c, 8) < 20 * s->qscale)
2534 if (s->quantizer_noise_shaping) {
2536 get_visual_weight(weight[0], ptr_y , wrap_y);
2538 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2540 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2542 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2544 get_visual_weight(weight[4], ptr_cb , wrap_c);
2546 get_visual_weight(weight[5], ptr_cr , wrap_c);
2547 if (!s->chroma_y_shift) { /* 422 */
2549 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2552 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2555 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2558 /* DCT & quantize */
2559 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2561 for (i = 0; i < mb_block_count; i++) {
2564 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2565 // FIXME we could decide to change to quantizer instead of
2567 // JS: I don't think that would be a good idea it could lower
2568 // quality instead of improve it. Just INTRADC clipping
2569 // deserves changes in quantizer
2571 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2573 s->block_last_index[i] = -1;
2575 if (s->quantizer_noise_shaping) {
2576 for (i = 0; i < mb_block_count; i++) {
2578 s->block_last_index[i] =
2579 dct_quantize_refine(s, s->block[i], weight[i],
2580 orig[i], i, s->qscale);
2585 if (s->luma_elim_threshold && !s->mb_intra)
2586 for (i = 0; i < 4; i++)
2587 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2588 if (s->chroma_elim_threshold && !s->mb_intra)
2589 for (i = 4; i < mb_block_count; i++)
2590 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2592 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2593 for (i = 0; i < mb_block_count; i++) {
2594 if (s->block_last_index[i] == -1)
2595 s->coded_score[i] = INT_MAX / 256;
2600 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2601 s->block_last_index[4] =
2602 s->block_last_index[5] = 0;
2604 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2605 if (!s->chroma_y_shift) { /* 422 / 444 */
2606 for (i=6; i<12; i++) {
2607 s->block_last_index[i] = 0;
2608 s->block[i][0] = s->block[4][0];
2613 // non c quantize code returns incorrect block_last_index FIXME
2614 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2615 for (i = 0; i < mb_block_count; i++) {
2617 if (s->block_last_index[i] > 0) {
2618 for (j = 63; j > 0; j--) {
2619 if (s->block[i][s->intra_scantable.permutated[j]])
2622 s->block_last_index[i] = j;
2627 /* huffman encode */
2628 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2629 case AV_CODEC_ID_MPEG1VIDEO:
2630 case AV_CODEC_ID_MPEG2VIDEO:
2631 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2632 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2634 case AV_CODEC_ID_MPEG4:
2635 if (CONFIG_MPEG4_ENCODER)
2636 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2638 case AV_CODEC_ID_MSMPEG4V2:
2639 case AV_CODEC_ID_MSMPEG4V3:
2640 case AV_CODEC_ID_WMV1:
2641 if (CONFIG_MSMPEG4_ENCODER)
2642 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2644 case AV_CODEC_ID_WMV2:
2645 if (CONFIG_WMV2_ENCODER)
2646 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2648 case AV_CODEC_ID_H261:
2649 if (CONFIG_H261_ENCODER)
2650 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2652 case AV_CODEC_ID_H263:
2653 case AV_CODEC_ID_H263P:
2654 case AV_CODEC_ID_FLV1:
2655 case AV_CODEC_ID_RV10:
2656 case AV_CODEC_ID_RV20:
2657 if (CONFIG_H263_ENCODER)
2658 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2660 case AV_CODEC_ID_MJPEG:
2661 case AV_CODEC_ID_AMV:
2662 if (CONFIG_MJPEG_ENCODER)
2663 ff_mjpeg_encode_mb(s, s->block);
2670 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2672 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2673 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2674 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2677 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2680 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2683 d->mb_skip_run= s->mb_skip_run;
2685 d->last_dc[i] = s->last_dc[i];
2688 d->mv_bits= s->mv_bits;
2689 d->i_tex_bits= s->i_tex_bits;
2690 d->p_tex_bits= s->p_tex_bits;
2691 d->i_count= s->i_count;
2692 d->f_count= s->f_count;
2693 d->b_count= s->b_count;
2694 d->skip_count= s->skip_count;
2695 d->misc_bits= s->misc_bits;
2699 d->qscale= s->qscale;
2700 d->dquant= s->dquant;
2702 d->esc3_level_length= s->esc3_level_length;
2705 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2708 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2709 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2712 d->mb_skip_run= s->mb_skip_run;
2714 d->last_dc[i] = s->last_dc[i];
2717 d->mv_bits= s->mv_bits;
2718 d->i_tex_bits= s->i_tex_bits;
2719 d->p_tex_bits= s->p_tex_bits;
2720 d->i_count= s->i_count;
2721 d->f_count= s->f_count;
2722 d->b_count= s->b_count;
2723 d->skip_count= s->skip_count;
2724 d->misc_bits= s->misc_bits;
2726 d->mb_intra= s->mb_intra;
2727 d->mb_skipped= s->mb_skipped;
2728 d->mv_type= s->mv_type;
2729 d->mv_dir= s->mv_dir;
2731 if(s->data_partitioning){
2733 d->tex_pb= s->tex_pb;
2737 d->block_last_index[i]= s->block_last_index[i];
2738 d->interlaced_dct= s->interlaced_dct;
2739 d->qscale= s->qscale;
2741 d->esc3_level_length= s->esc3_level_length;
2744 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2745 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2746 int *dmin, int *next_block, int motion_x, int motion_y)
2749 uint8_t *dest_backup[3];
2751 copy_context_before_encode(s, backup, type);
2753 s->block= s->blocks[*next_block];
2754 s->pb= pb[*next_block];
2755 if(s->data_partitioning){
2756 s->pb2 = pb2 [*next_block];
2757 s->tex_pb= tex_pb[*next_block];
2761 memcpy(dest_backup, s->dest, sizeof(s->dest));
2762 s->dest[0] = s->sc.rd_scratchpad;
2763 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2764 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2765 av_assert0(s->linesize >= 32); //FIXME
2768 encode_mb(s, motion_x, motion_y);
2770 score= put_bits_count(&s->pb);
2771 if(s->data_partitioning){
2772 score+= put_bits_count(&s->pb2);
2773 score+= put_bits_count(&s->tex_pb);
2776 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2777 ff_mpv_reconstruct_mb(s, s->block);
2779 score *= s->lambda2;
2780 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2784 memcpy(s->dest, dest_backup, sizeof(s->dest));
2791 copy_context_after_encode(best, s, type);
2795 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2796 uint32_t *sq = ff_square_tab + 256;
2801 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2802 else if(w==8 && h==8)
2803 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2807 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2816 static int sse_mb(MpegEncContext *s){
2820 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2821 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2824 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2825 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2826 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2827 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2829 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2830 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2831 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2834 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2835 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2836 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2839 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2840 MpegEncContext *s= *(void**)arg;
2844 s->me.dia_size= s->avctx->pre_dia_size;
2845 s->first_slice_line=1;
2846 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2847 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2848 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2850 s->first_slice_line=0;
2858 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2859 MpegEncContext *s= *(void**)arg;
2861 ff_check_alignment();
2863 s->me.dia_size= s->avctx->dia_size;
2864 s->first_slice_line=1;
2865 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2866 s->mb_x=0; //for block init below
2867 ff_init_block_index(s);
2868 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2869 s->block_index[0]+=2;
2870 s->block_index[1]+=2;
2871 s->block_index[2]+=2;
2872 s->block_index[3]+=2;
2874 /* compute motion vector & mb_type and store in context */
2875 if(s->pict_type==AV_PICTURE_TYPE_B)
2876 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2878 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2880 s->first_slice_line=0;
2885 static int mb_var_thread(AVCodecContext *c, void *arg){
2886 MpegEncContext *s= *(void**)arg;
2889 ff_check_alignment();
2891 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2892 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2895 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2897 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2899 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2900 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2902 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2903 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2904 s->me.mb_var_sum_temp += varc;
2910 static void write_slice_end(MpegEncContext *s){
2911 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2912 if(s->partitioned_frame){
2913 ff_mpeg4_merge_partitions(s);
2916 ff_mpeg4_stuffing(&s->pb);
2917 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2918 ff_mjpeg_encode_stuffing(s);
2921 avpriv_align_put_bits(&s->pb);
2922 flush_put_bits(&s->pb);
2924 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2925 s->misc_bits+= get_bits_diff(s);
2928 static void write_mb_info(MpegEncContext *s)
2930 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2931 int offset = put_bits_count(&s->pb);
2932 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2933 int gobn = s->mb_y / s->gob_index;
2935 if (CONFIG_H263_ENCODER)
2936 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2937 bytestream_put_le32(&ptr, offset);
2938 bytestream_put_byte(&ptr, s->qscale);
2939 bytestream_put_byte(&ptr, gobn);
2940 bytestream_put_le16(&ptr, mba);
2941 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2942 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2943 /* 4MV not implemented */
2944 bytestream_put_byte(&ptr, 0); /* hmv2 */
2945 bytestream_put_byte(&ptr, 0); /* vmv2 */
2948 static void update_mb_info(MpegEncContext *s, int startcode)
2952 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2953 s->mb_info_size += 12;
2954 s->prev_mb_info = s->last_mb_info;
2957 s->prev_mb_info = put_bits_count(&s->pb)/8;
2958 /* This might have incremented mb_info_size above, and we return without
2959 * actually writing any info into that slot yet. But in that case,
2960 * this will be called again at the start of the after writing the
2961 * start code, actually writing the mb info. */
2965 s->last_mb_info = put_bits_count(&s->pb)/8;
2966 if (!s->mb_info_size)
2967 s->mb_info_size += 12;
2971 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2973 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2974 && s->slice_context_count == 1
2975 && s->pb.buf == s->avctx->internal->byte_buffer) {
2976 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2977 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2979 uint8_t *new_buffer = NULL;
2980 int new_buffer_size = 0;
2982 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2983 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2984 return AVERROR(ENOMEM);
2989 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2990 s->avctx->internal->byte_buffer_size + size_increase);
2992 return AVERROR(ENOMEM);
2994 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2995 av_free(s->avctx->internal->byte_buffer);
2996 s->avctx->internal->byte_buffer = new_buffer;
2997 s->avctx->internal->byte_buffer_size = new_buffer_size;
2998 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2999 s->ptr_lastgob = s->pb.buf + lastgob_pos;
3000 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
3002 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
3003 return AVERROR(EINVAL);
3007 static int encode_thread(AVCodecContext *c, void *arg){
3008 MpegEncContext *s= *(void**)arg;
3010 int chr_h= 16>>s->chroma_y_shift;
3012 MpegEncContext best_s = { 0 }, backup_s;
3013 uint8_t bit_buf[2][MAX_MB_BYTES];
3014 uint8_t bit_buf2[2][MAX_MB_BYTES];
3015 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
3016 PutBitContext pb[2], pb2[2], tex_pb[2];
3018 ff_check_alignment();
3021 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
3022 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
3023 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
3026 s->last_bits= put_bits_count(&s->pb);
3037 /* init last dc values */
3038 /* note: quant matrix value (8) is implied here */
3039 s->last_dc[i] = 128 << s->intra_dc_precision;
3041 s->current_picture.encoding_error[i] = 0;
3043 if(s->codec_id==AV_CODEC_ID_AMV){
3044 s->last_dc[0] = 128*8/13;
3045 s->last_dc[1] = 128*8/14;
3046 s->last_dc[2] = 128*8/14;
3049 memset(s->last_mv, 0, sizeof(s->last_mv));
3053 switch(s->codec_id){
3054 case AV_CODEC_ID_H263:
3055 case AV_CODEC_ID_H263P:
3056 case AV_CODEC_ID_FLV1:
3057 if (CONFIG_H263_ENCODER)
3058 s->gob_index = H263_GOB_HEIGHT(s->height);
3060 case AV_CODEC_ID_MPEG4:
3061 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
3062 ff_mpeg4_init_partitions(s);
3068 s->first_slice_line = 1;
3069 s->ptr_lastgob = s->pb.buf;
3070 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
3074 ff_set_qscale(s, s->qscale);
3075 ff_init_block_index(s);
3077 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3078 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3079 int mb_type= s->mb_type[xy];
3083 int size_increase = s->avctx->internal->byte_buffer_size/4
3084 + s->mb_width*MAX_MB_BYTES;
3086 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
3087 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
3088 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3091 if(s->data_partitioning){
3092 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3093 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3094 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3100 s->mb_y = mb_y; // moved into loop, can get changed by H.261
3101 ff_update_block_index(s);
3103 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3104 ff_h261_reorder_mb_index(s);
3105 xy= s->mb_y*s->mb_stride + s->mb_x;
3106 mb_type= s->mb_type[xy];
3109 /* write gob / video packet header */
3111 int current_packet_size, is_gob_start;
3113 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3115 is_gob_start = s->rtp_payload_size &&
3116 current_packet_size >= s->rtp_payload_size &&
3119 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3121 switch(s->codec_id){
3122 case AV_CODEC_ID_H263:
3123 case AV_CODEC_ID_H263P:
3124 if(!s->h263_slice_structured)
3125 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3127 case AV_CODEC_ID_MPEG2VIDEO:
3128 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3129 case AV_CODEC_ID_MPEG1VIDEO:
3130 if(s->mb_skip_run) is_gob_start=0;
3132 case AV_CODEC_ID_MJPEG:
3133 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3138 if(s->start_mb_y != mb_y || mb_x!=0){
3141 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3142 ff_mpeg4_init_partitions(s);
3146 av_assert2((put_bits_count(&s->pb)&7) == 0);
3147 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3149 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3150 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3151 int d = 100 / s->error_rate;
3153 current_packet_size=0;
3154 s->pb.buf_ptr= s->ptr_lastgob;
3155 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3159 #if FF_API_RTP_CALLBACK
3160 FF_DISABLE_DEPRECATION_WARNINGS
3161 if (s->avctx->rtp_callback){
3162 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3163 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3165 FF_ENABLE_DEPRECATION_WARNINGS
3167 update_mb_info(s, 1);
3169 switch(s->codec_id){
3170 case AV_CODEC_ID_MPEG4:
3171 if (CONFIG_MPEG4_ENCODER) {
3172 ff_mpeg4_encode_video_packet_header(s);
3173 ff_mpeg4_clean_buffers(s);
3176 case AV_CODEC_ID_MPEG1VIDEO:
3177 case AV_CODEC_ID_MPEG2VIDEO:
3178 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3179 ff_mpeg1_encode_slice_header(s);
3180 ff_mpeg1_clean_buffers(s);
3183 case AV_CODEC_ID_H263:
3184 case AV_CODEC_ID_H263P:
3185 if (CONFIG_H263_ENCODER)
3186 ff_h263_encode_gob_header(s, mb_y);
3190 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3191 int bits= put_bits_count(&s->pb);
3192 s->misc_bits+= bits - s->last_bits;
3196 s->ptr_lastgob += current_packet_size;
3197 s->first_slice_line=1;
3198 s->resync_mb_x=mb_x;
3199 s->resync_mb_y=mb_y;
3203 if( (s->resync_mb_x == s->mb_x)
3204 && s->resync_mb_y+1 == s->mb_y){
3205 s->first_slice_line=0;
3209 s->dquant=0; //only for QP_RD
3211 update_mb_info(s, 0);
3213 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3215 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3217 copy_context_before_encode(&backup_s, s, -1);
3219 best_s.data_partitioning= s->data_partitioning;
3220 best_s.partitioned_frame= s->partitioned_frame;
3221 if(s->data_partitioning){
3222 backup_s.pb2= s->pb2;
3223 backup_s.tex_pb= s->tex_pb;
3226 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3227 s->mv_dir = MV_DIR_FORWARD;
3228 s->mv_type = MV_TYPE_16X16;
3230 s->mv[0][0][0] = s->p_mv_table[xy][0];
3231 s->mv[0][0][1] = s->p_mv_table[xy][1];
3232 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3233 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3235 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3236 s->mv_dir = MV_DIR_FORWARD;
3237 s->mv_type = MV_TYPE_FIELD;
3240 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3241 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3242 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3244 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3245 &dmin, &next_block, 0, 0);
3247 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3248 s->mv_dir = MV_DIR_FORWARD;
3249 s->mv_type = MV_TYPE_16X16;
3253 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3254 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3256 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3257 s->mv_dir = MV_DIR_FORWARD;
3258 s->mv_type = MV_TYPE_8X8;
3261 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3262 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3264 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3265 &dmin, &next_block, 0, 0);
3267 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3268 s->mv_dir = MV_DIR_FORWARD;
3269 s->mv_type = MV_TYPE_16X16;
3271 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3272 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3273 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3274 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3276 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3277 s->mv_dir = MV_DIR_BACKWARD;
3278 s->mv_type = MV_TYPE_16X16;
3280 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3281 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3282 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3283 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3285 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3286 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3287 s->mv_type = MV_TYPE_16X16;
3289 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3290 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3291 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3292 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3293 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3294 &dmin, &next_block, 0, 0);
3296 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3297 s->mv_dir = MV_DIR_FORWARD;
3298 s->mv_type = MV_TYPE_FIELD;
3301 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3302 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3303 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3305 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3306 &dmin, &next_block, 0, 0);
3308 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3309 s->mv_dir = MV_DIR_BACKWARD;
3310 s->mv_type = MV_TYPE_FIELD;
3313 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3314 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3315 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3317 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3318 &dmin, &next_block, 0, 0);
3320 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3321 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3322 s->mv_type = MV_TYPE_FIELD;
3324 for(dir=0; dir<2; dir++){
3326 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3327 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3328 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3331 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3332 &dmin, &next_block, 0, 0);
3334 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3336 s->mv_type = MV_TYPE_16X16;
3340 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3341 &dmin, &next_block, 0, 0);
3342 if(s->h263_pred || s->h263_aic){
3344 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3346 ff_clean_intra_table_entries(s); //old mode?
3350 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3351 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3352 const int last_qp= backup_s.qscale;
3355 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3356 static const int dquant_tab[4]={-1,1,-2,2};
3357 int storecoefs = s->mb_intra && s->dc_val[0];
3359 av_assert2(backup_s.dquant == 0);
3362 s->mv_dir= best_s.mv_dir;
3363 s->mv_type = MV_TYPE_16X16;
3364 s->mb_intra= best_s.mb_intra;
3365 s->mv[0][0][0] = best_s.mv[0][0][0];
3366 s->mv[0][0][1] = best_s.mv[0][0][1];
3367 s->mv[1][0][0] = best_s.mv[1][0][0];
3368 s->mv[1][0][1] = best_s.mv[1][0][1];
3370 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3371 for(; qpi<4; qpi++){
3372 int dquant= dquant_tab[qpi];
3373 qp= last_qp + dquant;
3374 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3376 backup_s.dquant= dquant;
3379 dc[i]= s->dc_val[0][ s->block_index[i] ];
3380 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3384 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3385 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3386 if(best_s.qscale != qp){
3389 s->dc_val[0][ s->block_index[i] ]= dc[i];
3390 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3397 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3398 int mx= s->b_direct_mv_table[xy][0];
3399 int my= s->b_direct_mv_table[xy][1];
3401 backup_s.dquant = 0;
3402 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3404 ff_mpeg4_set_direct_mv(s, mx, my);
3405 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3406 &dmin, &next_block, mx, my);
3408 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3409 backup_s.dquant = 0;
3410 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3412 ff_mpeg4_set_direct_mv(s, 0, 0);
3413 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3414 &dmin, &next_block, 0, 0);
3416 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3419 coded |= s->block_last_index[i];
3422 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3423 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3424 mx=my=0; //FIXME find the one we actually used
3425 ff_mpeg4_set_direct_mv(s, mx, my);
3426 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3434 s->mv_dir= best_s.mv_dir;
3435 s->mv_type = best_s.mv_type;
3437 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3438 s->mv[0][0][1] = best_s.mv[0][0][1];
3439 s->mv[1][0][0] = best_s.mv[1][0][0];
3440 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3443 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3444 &dmin, &next_block, mx, my);
3449 s->current_picture.qscale_table[xy] = best_s.qscale;
3451 copy_context_after_encode(s, &best_s, -1);
3453 pb_bits_count= put_bits_count(&s->pb);
3454 flush_put_bits(&s->pb);
3455 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3458 if(s->data_partitioning){
3459 pb2_bits_count= put_bits_count(&s->pb2);
3460 flush_put_bits(&s->pb2);
3461 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3462 s->pb2= backup_s.pb2;
3464 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3465 flush_put_bits(&s->tex_pb);
3466 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3467 s->tex_pb= backup_s.tex_pb;
3469 s->last_bits= put_bits_count(&s->pb);
3471 if (CONFIG_H263_ENCODER &&
3472 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3473 ff_h263_update_motion_val(s);
3475 if(next_block==0){ //FIXME 16 vs linesize16
3476 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3477 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3478 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3481 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3482 ff_mpv_reconstruct_mb(s, s->block);
3484 int motion_x = 0, motion_y = 0;
3485 s->mv_type=MV_TYPE_16X16;
3486 // only one MB-Type possible
3489 case CANDIDATE_MB_TYPE_INTRA:
3492 motion_x= s->mv[0][0][0] = 0;
3493 motion_y= s->mv[0][0][1] = 0;
3495 case CANDIDATE_MB_TYPE_INTER:
3496 s->mv_dir = MV_DIR_FORWARD;
3498 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3499 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3501 case CANDIDATE_MB_TYPE_INTER_I:
3502 s->mv_dir = MV_DIR_FORWARD;
3503 s->mv_type = MV_TYPE_FIELD;
3506 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3507 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3508 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3511 case CANDIDATE_MB_TYPE_INTER4V:
3512 s->mv_dir = MV_DIR_FORWARD;
3513 s->mv_type = MV_TYPE_8X8;
3516 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3517 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3520 case CANDIDATE_MB_TYPE_DIRECT:
3521 if (CONFIG_MPEG4_ENCODER) {
3522 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3524 motion_x=s->b_direct_mv_table[xy][0];
3525 motion_y=s->b_direct_mv_table[xy][1];
3526 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3529 case CANDIDATE_MB_TYPE_DIRECT0:
3530 if (CONFIG_MPEG4_ENCODER) {
3531 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3533 ff_mpeg4_set_direct_mv(s, 0, 0);
3536 case CANDIDATE_MB_TYPE_BIDIR:
3537 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3539 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3540 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3541 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3542 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3544 case CANDIDATE_MB_TYPE_BACKWARD:
3545 s->mv_dir = MV_DIR_BACKWARD;
3547 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3548 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3550 case CANDIDATE_MB_TYPE_FORWARD:
3551 s->mv_dir = MV_DIR_FORWARD;
3553 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3554 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3556 case CANDIDATE_MB_TYPE_FORWARD_I:
3557 s->mv_dir = MV_DIR_FORWARD;
3558 s->mv_type = MV_TYPE_FIELD;
3561 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3562 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3563 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3566 case CANDIDATE_MB_TYPE_BACKWARD_I:
3567 s->mv_dir = MV_DIR_BACKWARD;
3568 s->mv_type = MV_TYPE_FIELD;
3571 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3572 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3573 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3576 case CANDIDATE_MB_TYPE_BIDIR_I:
3577 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3578 s->mv_type = MV_TYPE_FIELD;
3580 for(dir=0; dir<2; dir++){
3582 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3583 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3584 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3589 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3592 encode_mb(s, motion_x, motion_y);
3594 // RAL: Update last macroblock type
3595 s->last_mv_dir = s->mv_dir;
3597 if (CONFIG_H263_ENCODER &&
3598 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3599 ff_h263_update_motion_val(s);
3601 ff_mpv_reconstruct_mb(s, s->block);
3604 /* clean the MV table in IPS frames for direct mode in B-frames */
3605 if(s->mb_intra /* && I,P,S_TYPE */){
3606 s->p_mv_table[xy][0]=0;
3607 s->p_mv_table[xy][1]=0;
3610 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3614 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3615 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3617 s->current_picture.encoding_error[0] += sse(
3618 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3619 s->dest[0], w, h, s->linesize);
3620 s->current_picture.encoding_error[1] += sse(
3621 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3622 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3623 s->current_picture.encoding_error[2] += sse(
3624 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3625 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3628 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3629 ff_h263_loop_filter(s);
3631 ff_dlog(s->avctx, "MB %d %d bits\n",
3632 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3636 //not beautiful here but we must write it before flushing so it has to be here
3637 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3638 ff_msmpeg4_encode_ext_header(s);
3642 #if FF_API_RTP_CALLBACK
3643 FF_DISABLE_DEPRECATION_WARNINGS
3644 /* Send the last GOB if RTP */
3645 if (s->avctx->rtp_callback) {
3646 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3647 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3648 /* Call the RTP callback to send the last GOB */
3650 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3652 FF_ENABLE_DEPRECATION_WARNINGS
3658 #define MERGE(field) dst->field += src->field; src->field=0
3659 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3660 MERGE(me.scene_change_score);
3661 MERGE(me.mc_mb_var_sum_temp);
3662 MERGE(me.mb_var_sum_temp);
3665 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3668 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3669 MERGE(dct_count[1]);
3678 MERGE(er.error_count);
3679 MERGE(padding_bug_score);
3680 MERGE(current_picture.encoding_error[0]);
3681 MERGE(current_picture.encoding_error[1]);
3682 MERGE(current_picture.encoding_error[2]);
3684 if (dst->noise_reduction){
3685 for(i=0; i<64; i++){
3686 MERGE(dct_error_sum[0][i]);
3687 MERGE(dct_error_sum[1][i]);
3691 assert(put_bits_count(&src->pb) % 8 ==0);
3692 assert(put_bits_count(&dst->pb) % 8 ==0);
3693 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3694 flush_put_bits(&dst->pb);
3697 static int estimate_qp(MpegEncContext *s, int dry_run){
3698 if (s->next_lambda){
3699 s->current_picture_ptr->f->quality =
3700 s->current_picture.f->quality = s->next_lambda;
3701 if(!dry_run) s->next_lambda= 0;
3702 } else if (!s->fixed_qscale) {
3705 if ((s->avctx->flags & AV_CODEC_FLAG_PASS2) && s->rc_strategy == MPV_RC_STRATEGY_XVID)
3706 quality = ff_xvid_rate_estimate_qscale(s, dry_run);
3709 quality = ff_rate_estimate_qscale(s, dry_run);
3710 s->current_picture_ptr->f->quality =
3711 s->current_picture.f->quality = quality;
3712 if (s->current_picture.f->quality < 0)
3716 if(s->adaptive_quant){
3717 switch(s->codec_id){
3718 case AV_CODEC_ID_MPEG4:
3719 if (CONFIG_MPEG4_ENCODER)
3720 ff_clean_mpeg4_qscales(s);
3722 case AV_CODEC_ID_H263:
3723 case AV_CODEC_ID_H263P:
3724 case AV_CODEC_ID_FLV1:
3725 if (CONFIG_H263_ENCODER)
3726 ff_clean_h263_qscales(s);
3729 ff_init_qscale_tab(s);
3732 s->lambda= s->lambda_table[0];
3735 s->lambda = s->current_picture.f->quality;
3740 /* must be called before writing the header */
3741 static void set_frame_distances(MpegEncContext * s){
3742 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3743 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3745 if(s->pict_type==AV_PICTURE_TYPE_B){
3746 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3747 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3749 s->pp_time= s->time - s->last_non_b_time;
3750 s->last_non_b_time= s->time;
3751 assert(s->picture_number==0 || s->pp_time > 0);
3755 static int encode_picture(MpegEncContext *s, int picture_number)
3759 int context_count = s->slice_context_count;
3761 s->picture_number = picture_number;
3763 /* Reset the average MB variance */
3764 s->me.mb_var_sum_temp =
3765 s->me.mc_mb_var_sum_temp = 0;
3767 /* we need to initialize some time vars before we can encode B-frames */
3768 // RAL: Condition added for MPEG1VIDEO
3769 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3770 set_frame_distances(s);
3771 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3772 ff_set_mpeg4_time(s);
3774 s->me.scene_change_score=0;
3776 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3778 if(s->pict_type==AV_PICTURE_TYPE_I){
3779 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3780 else s->no_rounding=0;
3781 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3782 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3783 s->no_rounding ^= 1;
3786 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3787 if (estimate_qp(s,1) < 0)
3789 ff_get_2pass_fcode(s);
3790 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3791 if(s->pict_type==AV_PICTURE_TYPE_B)
3792 s->lambda= s->last_lambda_for[s->pict_type];
3794 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3798 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3799 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3800 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3801 s->q_chroma_intra_matrix = s->q_intra_matrix;
3802 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3805 s->mb_intra=0; //for the rate distortion & bit compare functions
3806 for(i=1; i<context_count; i++){
3807 ret = ff_update_duplicate_context(s->thread_context[i], s);
3815 /* Estimate motion for every MB */
3816 if(s->pict_type != AV_PICTURE_TYPE_I){
3817 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3818 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3819 if (s->pict_type != AV_PICTURE_TYPE_B) {
3820 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3822 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3826 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3827 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3829 for(i=0; i<s->mb_stride*s->mb_height; i++)
3830 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3832 if(!s->fixed_qscale){
3833 /* finding spatial complexity for I-frame rate control */
3834 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3837 for(i=1; i<context_count; i++){
3838 merge_context_after_me(s, s->thread_context[i]);
3840 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3841 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3844 if (s->me.scene_change_score > s->scenechange_threshold &&
3845 s->pict_type == AV_PICTURE_TYPE_P) {
3846 s->pict_type= AV_PICTURE_TYPE_I;
3847 for(i=0; i<s->mb_stride*s->mb_height; i++)
3848 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3849 if(s->msmpeg4_version >= 3)
3851 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3852 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3856 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3857 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3859 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3861 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3862 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3863 s->f_code= FFMAX3(s->f_code, a, b);
3866 ff_fix_long_p_mvs(s);
3867 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3868 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3872 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3873 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3878 if(s->pict_type==AV_PICTURE_TYPE_B){
3881 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3882 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3883 s->f_code = FFMAX(a, b);
3885 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3886 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3887 s->b_code = FFMAX(a, b);
3889 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3890 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3891 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3892 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3893 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3895 for(dir=0; dir<2; dir++){
3898 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3899 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3900 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3901 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3909 if (estimate_qp(s, 0) < 0)
3912 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3913 s->pict_type == AV_PICTURE_TYPE_I &&
3914 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3915 s->qscale= 3; //reduce clipping problems
3917 if (s->out_format == FMT_MJPEG) {
3918 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3919 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3921 if (s->avctx->intra_matrix) {
3923 luma_matrix = s->avctx->intra_matrix;
3925 if (s->avctx->chroma_intra_matrix)
3926 chroma_matrix = s->avctx->chroma_intra_matrix;
3928 /* for mjpeg, we do include qscale in the matrix */
3930 int j = s->idsp.idct_permutation[i];
3932 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3933 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3935 s->y_dc_scale_table=
3936 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3937 s->chroma_intra_matrix[0] =
3938 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3939 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3940 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3941 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3942 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3945 if(s->codec_id == AV_CODEC_ID_AMV){
3946 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3947 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3949 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3951 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3952 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3954 s->y_dc_scale_table= y;
3955 s->c_dc_scale_table= c;
3956 s->intra_matrix[0] = 13;
3957 s->chroma_intra_matrix[0] = 14;
3958 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3959 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3960 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3961 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3965 //FIXME var duplication
3966 s->current_picture_ptr->f->key_frame =
3967 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3968 s->current_picture_ptr->f->pict_type =
3969 s->current_picture.f->pict_type = s->pict_type;
3971 if (s->current_picture.f->key_frame)
3972 s->picture_in_gop_number=0;
3974 s->mb_x = s->mb_y = 0;
3975 s->last_bits= put_bits_count(&s->pb);
3976 switch(s->out_format) {
3978 if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3979 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3980 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3983 if (CONFIG_H261_ENCODER)
3984 ff_h261_encode_picture_header(s, picture_number);
3987 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3988 ff_wmv2_encode_picture_header(s, picture_number);
3989 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3990 ff_msmpeg4_encode_picture_header(s, picture_number);
3991 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3992 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3995 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3996 ret = ff_rv10_encode_picture_header(s, picture_number);
4000 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
4001 ff_rv20_encode_picture_header(s, picture_number);
4002 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
4003 ff_flv_encode_picture_header(s, picture_number);
4004 else if (CONFIG_H263_ENCODER)
4005 ff_h263_encode_picture_header(s, picture_number);
4008 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
4009 ff_mpeg1_encode_picture_header(s, picture_number);
4014 bits= put_bits_count(&s->pb);
4015 s->header_bits= bits - s->last_bits;
4017 for(i=1; i<context_count; i++){
4018 update_duplicate_context_after_me(s->thread_context[i], s);
4020 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
4021 for(i=1; i<context_count; i++){
4022 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
4023 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
4024 merge_context_after_encode(s, s->thread_context[i]);
4030 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
4031 const int intra= s->mb_intra;
4034 s->dct_count[intra]++;
4036 for(i=0; i<64; i++){
4037 int level= block[i];
4041 s->dct_error_sum[intra][i] += level;
4042 level -= s->dct_offset[intra][i];
4043 if(level<0) level=0;
4045 s->dct_error_sum[intra][i] -= level;
4046 level += s->dct_offset[intra][i];
4047 if(level>0) level=0;
4054 static int dct_quantize_trellis_c(MpegEncContext *s,
4055 int16_t *block, int n,
4056 int qscale, int *overflow){
4058 const uint16_t *matrix;
4059 const uint8_t *scantable;
4060 const uint8_t *perm_scantable;
4062 unsigned int threshold1, threshold2;
4074 int coeff_count[64];
4075 int qmul, qadd, start_i, last_non_zero, i, dc;
4076 const int esc_length= s->ac_esc_length;
4078 uint8_t * last_length;
4079 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4082 s->fdsp.fdct(block);
4084 if(s->dct_error_sum)
4085 s->denoise_dct(s, block);
4087 qadd= ((qscale-1)|1)*8;
4089 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4090 else mpeg2_qscale = qscale << 1;
4094 scantable= s->intra_scantable.scantable;
4095 perm_scantable= s->intra_scantable.permutated;
4103 /* For AIC we skip quant/dequant of INTRADC */
4108 /* note: block[0] is assumed to be positive */
4109 block[0] = (block[0] + (q >> 1)) / q;
4112 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4113 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4114 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4115 bias= 1<<(QMAT_SHIFT-1);
4117 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4118 length = s->intra_chroma_ac_vlc_length;
4119 last_length= s->intra_chroma_ac_vlc_last_length;
4121 length = s->intra_ac_vlc_length;
4122 last_length= s->intra_ac_vlc_last_length;
4125 scantable= s->inter_scantable.scantable;
4126 perm_scantable= s->inter_scantable.permutated;
4129 qmat = s->q_inter_matrix[qscale];
4130 matrix = s->inter_matrix;
4131 length = s->inter_ac_vlc_length;
4132 last_length= s->inter_ac_vlc_last_length;
4136 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4137 threshold2= (threshold1<<1);
4139 for(i=63; i>=start_i; i--) {
4140 const int j = scantable[i];
4141 int level = block[j] * qmat[j];
4143 if(((unsigned)(level+threshold1))>threshold2){
4149 for(i=start_i; i<=last_non_zero; i++) {
4150 const int j = scantable[i];
4151 int level = block[j] * qmat[j];
4153 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4154 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4155 if(((unsigned)(level+threshold1))>threshold2){
4157 level= (bias + level)>>QMAT_SHIFT;
4159 coeff[1][i]= level-1;
4160 // coeff[2][k]= level-2;
4162 level= (bias - level)>>QMAT_SHIFT;
4163 coeff[0][i]= -level;
4164 coeff[1][i]= -level+1;
4165 // coeff[2][k]= -level+2;
4167 coeff_count[i]= FFMIN(level, 2);
4168 av_assert2(coeff_count[i]);
4171 coeff[0][i]= (level>>31)|1;
4176 *overflow= s->max_qcoeff < max; //overflow might have happened
4178 if(last_non_zero < start_i){
4179 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4180 return last_non_zero;
4183 score_tab[start_i]= 0;
4184 survivor[0]= start_i;
4187 for(i=start_i; i<=last_non_zero; i++){
4188 int level_index, j, zero_distortion;
4189 int dct_coeff= FFABS(block[ scantable[i] ]);
4190 int best_score=256*256*256*120;
4192 if (s->fdsp.fdct == ff_fdct_ifast)
4193 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4194 zero_distortion= dct_coeff*dct_coeff;
4196 for(level_index=0; level_index < coeff_count[i]; level_index++){
4198 int level= coeff[level_index][i];
4199 const int alevel= FFABS(level);
4204 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4205 unquant_coeff= alevel*qmul + qadd;
4206 } else if(s->out_format == FMT_MJPEG) {
4207 j = s->idsp.idct_permutation[scantable[i]];
4208 unquant_coeff = alevel * matrix[j] * 8;
4210 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4212 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4213 unquant_coeff = (unquant_coeff - 1) | 1;
4215 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4216 unquant_coeff = (unquant_coeff - 1) | 1;
4221 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4223 if((level&(~127)) == 0){
4224 for(j=survivor_count-1; j>=0; j--){
4225 int run= i - survivor[j];
4226 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4227 score += score_tab[i-run];
4229 if(score < best_score){
4232 level_tab[i+1]= level-64;
4236 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4237 for(j=survivor_count-1; j>=0; j--){
4238 int run= i - survivor[j];
4239 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4240 score += score_tab[i-run];
4241 if(score < last_score){
4244 last_level= level-64;
4250 distortion += esc_length*lambda;
4251 for(j=survivor_count-1; j>=0; j--){
4252 int run= i - survivor[j];
4253 int score= distortion + score_tab[i-run];
4255 if(score < best_score){
4258 level_tab[i+1]= level-64;
4262 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4263 for(j=survivor_count-1; j>=0; j--){
4264 int run= i - survivor[j];
4265 int score= distortion + score_tab[i-run];
4266 if(score < last_score){
4269 last_level= level-64;
4277 score_tab[i+1]= best_score;
4279 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4280 if(last_non_zero <= 27){
4281 for(; survivor_count; survivor_count--){
4282 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4286 for(; survivor_count; survivor_count--){
4287 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4292 survivor[ survivor_count++ ]= i+1;
4295 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4296 last_score= 256*256*256*120;
4297 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4298 int score= score_tab[i];
4300 score += lambda * 2; // FIXME more exact?
4302 if(score < last_score){
4305 last_level= level_tab[i];
4306 last_run= run_tab[i];
4311 s->coded_score[n] = last_score;
4313 dc= FFABS(block[0]);
4314 last_non_zero= last_i - 1;
4315 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4317 if(last_non_zero < start_i)
4318 return last_non_zero;
4320 if(last_non_zero == 0 && start_i == 0){
4322 int best_score= dc * dc;
4324 for(i=0; i<coeff_count[0]; i++){
4325 int level= coeff[i][0];
4326 int alevel= FFABS(level);
4327 int unquant_coeff, score, distortion;
4329 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4330 unquant_coeff= (alevel*qmul + qadd)>>3;
4332 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4333 unquant_coeff = (unquant_coeff - 1) | 1;
4335 unquant_coeff = (unquant_coeff + 4) >> 3;
4336 unquant_coeff<<= 3 + 3;
4338 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4340 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4341 else score= distortion + esc_length*lambda;
4343 if(score < best_score){
4345 best_level= level - 64;
4348 block[0]= best_level;
4349 s->coded_score[n] = best_score - dc*dc;
4350 if(best_level == 0) return -1;
4351 else return last_non_zero;
4355 av_assert2(last_level);
4357 block[ perm_scantable[last_non_zero] ]= last_level;
4360 for(; i>start_i; i -= run_tab[i] + 1){
4361 block[ perm_scantable[i-1] ]= level_tab[i];
4364 return last_non_zero;
4367 //#define REFINE_STATS 1
4368 static int16_t basis[64][64];
4370 static void build_basis(uint8_t *perm){
4377 double s= 0.25*(1<<BASIS_SHIFT);
4379 int perm_index= perm[index];
4380 if(i==0) s*= sqrt(0.5);
4381 if(j==0) s*= sqrt(0.5);
4382 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4389 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4390 int16_t *block, int16_t *weight, int16_t *orig,
4393 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4394 const uint8_t *scantable;
4395 const uint8_t *perm_scantable;
4396 // unsigned int threshold1, threshold2;
4401 int qmul, qadd, start_i, last_non_zero, i, dc;
4403 uint8_t * last_length;
4405 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4408 static int after_last=0;
4409 static int to_zero=0;
4410 static int from_zero=0;
4413 static int messed_sign=0;
4416 if(basis[0][0] == 0)
4417 build_basis(s->idsp.idct_permutation);
4422 scantable= s->intra_scantable.scantable;
4423 perm_scantable= s->intra_scantable.permutated;
4430 /* For AIC we skip quant/dequant of INTRADC */
4434 q <<= RECON_SHIFT-3;
4435 /* note: block[0] is assumed to be positive */
4437 // block[0] = (block[0] + (q >> 1)) / q;
4439 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4440 // bias= 1<<(QMAT_SHIFT-1);
4441 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4442 length = s->intra_chroma_ac_vlc_length;
4443 last_length= s->intra_chroma_ac_vlc_last_length;
4445 length = s->intra_ac_vlc_length;
4446 last_length= s->intra_ac_vlc_last_length;
4449 scantable= s->inter_scantable.scantable;
4450 perm_scantable= s->inter_scantable.permutated;
4453 length = s->inter_ac_vlc_length;
4454 last_length= s->inter_ac_vlc_last_length;
4456 last_non_zero = s->block_last_index[n];
4461 dc += (1<<(RECON_SHIFT-1));
4462 for(i=0; i<64; i++){
4463 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4466 STOP_TIMER("memset rem[]")}
4469 for(i=0; i<64; i++){
4474 w= FFABS(weight[i]) + qns*one;
4475 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4478 // w=weight[i] = (63*qns + (w/2)) / w;
4481 av_assert2(w<(1<<6));
4484 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4490 for(i=start_i; i<=last_non_zero; i++){
4491 int j= perm_scantable[i];
4492 const int level= block[j];
4496 if(level<0) coeff= qmul*level - qadd;
4497 else coeff= qmul*level + qadd;
4498 run_tab[rle_index++]=run;
4501 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4507 if(last_non_zero>0){
4508 STOP_TIMER("init rem[]")
4515 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4518 int run2, best_unquant_change=0, analyze_gradient;
4522 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4524 if(analyze_gradient){
4528 for(i=0; i<64; i++){
4531 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4534 STOP_TIMER("rem*w*w")}
4544 const int level= block[0];
4545 int change, old_coeff;
4547 av_assert2(s->mb_intra);
4551 for(change=-1; change<=1; change+=2){
4552 int new_level= level + change;
4553 int score, new_coeff;
4555 new_coeff= q*new_level;
4556 if(new_coeff >= 2048 || new_coeff < 0)
4559 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4560 new_coeff - old_coeff);
4561 if(score<best_score){
4564 best_change= change;
4565 best_unquant_change= new_coeff - old_coeff;
4572 run2= run_tab[rle_index++];
4576 for(i=start_i; i<64; i++){
4577 int j= perm_scantable[i];
4578 const int level= block[j];
4579 int change, old_coeff;
4581 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4585 if(level<0) old_coeff= qmul*level - qadd;
4586 else old_coeff= qmul*level + qadd;
4587 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4591 av_assert2(run2>=0 || i >= last_non_zero );
4594 for(change=-1; change<=1; change+=2){
4595 int new_level= level + change;
4596 int score, new_coeff, unquant_change;
4599 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4603 if(new_level<0) new_coeff= qmul*new_level - qadd;
4604 else new_coeff= qmul*new_level + qadd;
4605 if(new_coeff >= 2048 || new_coeff <= -2048)
4607 //FIXME check for overflow
4610 if(level < 63 && level > -63){
4611 if(i < last_non_zero)
4612 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4613 - length[UNI_AC_ENC_INDEX(run, level+64)];
4615 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4616 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4619 av_assert2(FFABS(new_level)==1);
4621 if(analyze_gradient){
4622 int g= d1[ scantable[i] ];
4623 if(g && (g^new_level) >= 0)
4627 if(i < last_non_zero){
4628 int next_i= i + run2 + 1;
4629 int next_level= block[ perm_scantable[next_i] ] + 64;
4631 if(next_level&(~127))
4634 if(next_i < last_non_zero)
4635 score += length[UNI_AC_ENC_INDEX(run, 65)]
4636 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4637 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4639 score += length[UNI_AC_ENC_INDEX(run, 65)]
4640 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4641 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4643 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4645 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4646 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4652 av_assert2(FFABS(level)==1);
4654 if(i < last_non_zero){
4655 int next_i= i + run2 + 1;
4656 int next_level= block[ perm_scantable[next_i] ] + 64;
4658 if(next_level&(~127))
4661 if(next_i < last_non_zero)
4662 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4663 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4664 - length[UNI_AC_ENC_INDEX(run, 65)];
4666 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4667 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4668 - length[UNI_AC_ENC_INDEX(run, 65)];
4670 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4672 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4673 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4680 unquant_change= new_coeff - old_coeff;
4681 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4683 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4685 if(score<best_score){
4688 best_change= change;
4689 best_unquant_change= unquant_change;
4693 prev_level= level + 64;
4694 if(prev_level&(~127))
4703 STOP_TIMER("iterative step")}
4707 int j= perm_scantable[ best_coeff ];
4709 block[j] += best_change;
4711 if(best_coeff > last_non_zero){
4712 last_non_zero= best_coeff;
4713 av_assert2(block[j]);
4720 if(block[j] - best_change){
4721 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4733 for(; last_non_zero>=start_i; last_non_zero--){
4734 if(block[perm_scantable[last_non_zero]])
4740 if(256*256*256*64 % count == 0){
4741 av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4746 for(i=start_i; i<=last_non_zero; i++){
4747 int j= perm_scantable[i];
4748 const int level= block[j];
4751 run_tab[rle_index++]=run;
4758 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4764 if(last_non_zero>0){
4765 STOP_TIMER("iterative search")
4770 return last_non_zero;
4774 * Permute an 8x8 block according to permutation.
4775 * @param block the block which will be permuted according to
4776 * the given permutation vector
4777 * @param permutation the permutation vector
4778 * @param last the last non zero coefficient in scantable order, used to
4779 * speed the permutation up
4780 * @param scantable the used scantable, this is only used to speed the
4781 * permutation up, the block is not (inverse) permutated
4782 * to scantable order!
4784 void ff_block_permute(int16_t *block, uint8_t *permutation,
4785 const uint8_t *scantable, int last)
4792 //FIXME it is ok but not clean and might fail for some permutations
4793 // if (permutation[1] == 1)
4796 for (i = 0; i <= last; i++) {
4797 const int j = scantable[i];
4802 for (i = 0; i <= last; i++) {
4803 const int j = scantable[i];
4804 const int perm_j = permutation[j];
4805 block[perm_j] = temp[j];
4809 int ff_dct_quantize_c(MpegEncContext *s,
4810 int16_t *block, int n,
4811 int qscale, int *overflow)
4813 int i, j, level, last_non_zero, q, start_i;
4815 const uint8_t *scantable;
4818 unsigned int threshold1, threshold2;
4820 s->fdsp.fdct(block);
4822 if(s->dct_error_sum)
4823 s->denoise_dct(s, block);
4826 scantable= s->intra_scantable.scantable;
4834 /* For AIC we skip quant/dequant of INTRADC */
4837 /* note: block[0] is assumed to be positive */
4838 block[0] = (block[0] + (q >> 1)) / q;
4841 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4842 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4844 scantable= s->inter_scantable.scantable;
4847 qmat = s->q_inter_matrix[qscale];
4848 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4850 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4851 threshold2= (threshold1<<1);
4852 for(i=63;i>=start_i;i--) {
4854 level = block[j] * qmat[j];
4856 if(((unsigned)(level+threshold1))>threshold2){
4863 for(i=start_i; i<=last_non_zero; i++) {
4865 level = block[j] * qmat[j];
4867 // if( bias+level >= (1<<QMAT_SHIFT)
4868 // || bias-level >= (1<<QMAT_SHIFT)){
4869 if(((unsigned)(level+threshold1))>threshold2){
4871 level= (bias + level)>>QMAT_SHIFT;
4874 level= (bias - level)>>QMAT_SHIFT;
4882 *overflow= s->max_qcoeff < max; //overflow might have happened
4884 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4885 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4886 ff_block_permute(block, s->idsp.idct_permutation,
4887 scantable, last_non_zero);
4889 return last_non_zero;
4892 #define OFFSET(x) offsetof(MpegEncContext, x)
4893 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4894 static const AVOption h263_options[] = {
4895 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4896 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4901 static const AVClass h263_class = {
4902 .class_name = "H.263 encoder",
4903 .item_name = av_default_item_name,
4904 .option = h263_options,
4905 .version = LIBAVUTIL_VERSION_INT,
4908 AVCodec ff_h263_encoder = {
4910 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4911 .type = AVMEDIA_TYPE_VIDEO,
4912 .id = AV_CODEC_ID_H263,
4913 .priv_data_size = sizeof(MpegEncContext),
4914 .init = ff_mpv_encode_init,
4915 .encode2 = ff_mpv_encode_picture,
4916 .close = ff_mpv_encode_end,
4917 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4918 .priv_class = &h263_class,
4921 static const AVOption h263p_options[] = {
4922 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4923 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4924 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4925 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4929 static const AVClass h263p_class = {
4930 .class_name = "H.263p encoder",
4931 .item_name = av_default_item_name,
4932 .option = h263p_options,
4933 .version = LIBAVUTIL_VERSION_INT,
4936 AVCodec ff_h263p_encoder = {
4938 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4939 .type = AVMEDIA_TYPE_VIDEO,
4940 .id = AV_CODEC_ID_H263P,
4941 .priv_data_size = sizeof(MpegEncContext),
4942 .init = ff_mpv_encode_init,
4943 .encode2 = ff_mpv_encode_picture,
4944 .close = ff_mpv_encode_end,
4945 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4946 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4947 .priv_class = &h263p_class,
4950 static const AVClass msmpeg4v2_class = {
4951 .class_name = "msmpeg4v2 encoder",
4952 .item_name = av_default_item_name,
4953 .option = ff_mpv_generic_options,
4954 .version = LIBAVUTIL_VERSION_INT,
4957 AVCodec ff_msmpeg4v2_encoder = {
4958 .name = "msmpeg4v2",
4959 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4960 .type = AVMEDIA_TYPE_VIDEO,
4961 .id = AV_CODEC_ID_MSMPEG4V2,
4962 .priv_data_size = sizeof(MpegEncContext),
4963 .init = ff_mpv_encode_init,
4964 .encode2 = ff_mpv_encode_picture,
4965 .close = ff_mpv_encode_end,
4966 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4967 .priv_class = &msmpeg4v2_class,
4970 static const AVClass msmpeg4v3_class = {
4971 .class_name = "msmpeg4v3 encoder",
4972 .item_name = av_default_item_name,
4973 .option = ff_mpv_generic_options,
4974 .version = LIBAVUTIL_VERSION_INT,
4977 AVCodec ff_msmpeg4v3_encoder = {
4979 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4980 .type = AVMEDIA_TYPE_VIDEO,
4981 .id = AV_CODEC_ID_MSMPEG4V3,
4982 .priv_data_size = sizeof(MpegEncContext),
4983 .init = ff_mpv_encode_init,
4984 .encode2 = ff_mpv_encode_picture,
4985 .close = ff_mpv_encode_end,
4986 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4987 .priv_class = &msmpeg4v3_class,
4990 static const AVClass wmv1_class = {
4991 .class_name = "wmv1 encoder",
4992 .item_name = av_default_item_name,
4993 .option = ff_mpv_generic_options,
4994 .version = LIBAVUTIL_VERSION_INT,
4997 AVCodec ff_wmv1_encoder = {
4999 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
5000 .type = AVMEDIA_TYPE_VIDEO,
5001 .id = AV_CODEC_ID_WMV1,
5002 .priv_data_size = sizeof(MpegEncContext),
5003 .init = ff_mpv_encode_init,
5004 .encode2 = ff_mpv_encode_picture,
5005 .close = ff_mpv_encode_end,
5006 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
5007 .priv_class = &wmv1_class,