2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "libavutil/timer.h"
46 #include "mpegvideo.h"
47 #include "mpegvideodata.h"
51 #include "mjpegenc_common.h"
53 #include "mpegutils.h"
56 #include "pixblockdsp.h"
60 #include "aandcttab.h"
62 #include "mpeg4video.h"
64 #include "bytestream.h"
71 #define QUANT_BIAS_SHIFT 8
73 #define QMAT_SHIFT_MMX 16
76 static int encode_picture(MpegEncContext *s, int picture_number);
77 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
78 static int sse_mb(MpegEncContext *s);
79 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
80 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
82 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
83 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
85 const AVOption ff_mpv_generic_options[] = {
90 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
91 uint16_t (*qmat16)[2][64],
92 const uint16_t *quant_matrix,
93 int bias, int qmin, int qmax, int intra)
95 FDCTDSPContext *fdsp = &s->fdsp;
99 for (qscale = qmin; qscale <= qmax; qscale++) {
103 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
104 else qscale2 = qscale << 1;
106 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
108 fdsp->fdct == ff_faandct ||
109 #endif /* CONFIG_FAANDCT */
110 fdsp->fdct == ff_jpeg_fdct_islow_10) {
111 for (i = 0; i < 64; i++) {
112 const int j = s->idsp.idct_permutation[i];
113 int64_t den = (int64_t) qscale2 * quant_matrix[j];
114 /* 16 <= qscale * quant_matrix[i] <= 7905
115 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
116 * 19952 <= x <= 249205026
117 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
118 * 3444240 >= (1 << 36) / (x) >= 275 */
120 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
122 } else if (fdsp->fdct == ff_fdct_ifast) {
123 for (i = 0; i < 64; i++) {
124 const int j = s->idsp.idct_permutation[i];
125 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
126 /* 16 <= qscale * quant_matrix[i] <= 7905
127 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
128 * 19952 <= x <= 249205026
129 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
130 * 3444240 >= (1 << 36) / (x) >= 275 */
132 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
135 for (i = 0; i < 64; i++) {
136 const int j = s->idsp.idct_permutation[i];
137 int64_t den = (int64_t) qscale2 * quant_matrix[j];
138 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
139 * Assume x = qscale * quant_matrix[i]
141 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
142 * so 32768 >= (1 << 19) / (x) >= 67 */
143 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
144 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
145 // (qscale * quant_matrix[i]);
146 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
148 if (qmat16[qscale][0][i] == 0 ||
149 qmat16[qscale][0][i] == 128 * 256)
150 qmat16[qscale][0][i] = 128 * 256 - 1;
151 qmat16[qscale][1][i] =
152 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
153 qmat16[qscale][0][i]);
157 for (i = intra; i < 64; i++) {
159 if (fdsp->fdct == ff_fdct_ifast) {
160 max = (8191LL * ff_aanscales[i]) >> 14;
162 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
168 av_log(NULL, AV_LOG_INFO,
169 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
174 static inline void update_qscale(MpegEncContext *s)
176 if (s->q_scale_type == 1 && 0) {
178 int bestdiff=INT_MAX;
181 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
182 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
183 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
184 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
186 if (diff < bestdiff) {
193 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
194 (FF_LAMBDA_SHIFT + 7);
195 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
198 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
202 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
208 for (i = 0; i < 64; i++) {
209 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
216 * init s->current_picture.qscale_table from s->lambda_table
218 void ff_init_qscale_tab(MpegEncContext *s)
220 int8_t * const qscale_table = s->current_picture.qscale_table;
223 for (i = 0; i < s->mb_num; i++) {
224 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
225 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
226 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
231 static void update_duplicate_context_after_me(MpegEncContext *dst,
234 #define COPY(a) dst->a= src->a
236 COPY(current_picture);
242 COPY(picture_in_gop_number);
243 COPY(gop_picture_number);
244 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
245 COPY(progressive_frame); // FIXME don't set in encode_header
246 COPY(partitioned_frame); // FIXME don't set in encode_header
251 * Set the given MpegEncContext to defaults for encoding.
252 * the changed fields will not depend upon the prior state of the MpegEncContext.
254 static void mpv_encode_defaults(MpegEncContext *s)
257 ff_mpv_common_defaults(s);
259 for (i = -16; i < 16; i++) {
260 default_fcode_tab[i + MAX_MV] = 1;
262 s->me.mv_penalty = default_mv_penalty;
263 s->fcode_tab = default_fcode_tab;
265 s->input_picture_number = 0;
266 s->picture_in_gop_number = 0;
269 av_cold int ff_dct_encode_init(MpegEncContext *s)
272 ff_dct_encode_init_x86(s);
274 if (CONFIG_H263_ENCODER)
275 ff_h263dsp_init(&s->h263dsp);
276 if (!s->dct_quantize)
277 s->dct_quantize = ff_dct_quantize_c;
279 s->denoise_dct = denoise_dct_c;
280 s->fast_dct_quantize = s->dct_quantize;
281 if (s->avctx->trellis)
282 s->dct_quantize = dct_quantize_trellis_c;
287 /* init video encoder */
288 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
290 MpegEncContext *s = avctx->priv_data;
291 AVCPBProperties *cpb_props;
292 int i, ret, format_supported;
294 mpv_encode_defaults(s);
296 switch (avctx->codec_id) {
297 case AV_CODEC_ID_MPEG2VIDEO:
298 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
299 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
300 av_log(avctx, AV_LOG_ERROR,
301 "only YUV420 and YUV422 are supported\n");
305 case AV_CODEC_ID_MJPEG:
306 case AV_CODEC_ID_AMV:
307 format_supported = 0;
308 /* JPEG color space */
309 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
310 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
311 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
312 (avctx->color_range == AVCOL_RANGE_JPEG &&
313 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
314 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
315 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
316 format_supported = 1;
317 /* MPEG color space */
318 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
319 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
320 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
321 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
322 format_supported = 1;
324 if (!format_supported) {
325 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
330 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
331 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
336 switch (avctx->pix_fmt) {
337 case AV_PIX_FMT_YUVJ444P:
338 case AV_PIX_FMT_YUV444P:
339 s->chroma_format = CHROMA_444;
341 case AV_PIX_FMT_YUVJ422P:
342 case AV_PIX_FMT_YUV422P:
343 s->chroma_format = CHROMA_422;
345 case AV_PIX_FMT_YUVJ420P:
346 case AV_PIX_FMT_YUV420P:
348 s->chroma_format = CHROMA_420;
352 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
354 #if FF_API_PRIVATE_OPT
355 FF_DISABLE_DEPRECATION_WARNINGS
356 if (avctx->rtp_payload_size)
357 s->rtp_payload_size = avctx->rtp_payload_size;
358 if (avctx->me_penalty_compensation)
359 s->me_penalty_compensation = avctx->me_penalty_compensation;
361 s->me_pre = avctx->pre_me;
362 FF_ENABLE_DEPRECATION_WARNINGS
365 s->bit_rate = avctx->bit_rate;
366 s->width = avctx->width;
367 s->height = avctx->height;
368 if (avctx->gop_size > 600 &&
369 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
370 av_log(avctx, AV_LOG_WARNING,
371 "keyframe interval too large!, reducing it from %d to %d\n",
372 avctx->gop_size, 600);
373 avctx->gop_size = 600;
375 s->gop_size = avctx->gop_size;
377 if (avctx->max_b_frames > MAX_B_FRAMES) {
378 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
379 "is %d.\n", MAX_B_FRAMES);
380 avctx->max_b_frames = MAX_B_FRAMES;
382 s->max_b_frames = avctx->max_b_frames;
383 s->codec_id = avctx->codec->id;
384 s->strict_std_compliance = avctx->strict_std_compliance;
385 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
386 s->rtp_mode = !!s->rtp_payload_size;
387 s->intra_dc_precision = avctx->intra_dc_precision;
389 // workaround some differences between how applications specify dc precision
390 if (s->intra_dc_precision < 0) {
391 s->intra_dc_precision += 8;
392 } else if (s->intra_dc_precision >= 8)
393 s->intra_dc_precision -= 8;
395 if (s->intra_dc_precision < 0) {
396 av_log(avctx, AV_LOG_ERROR,
397 "intra dc precision must be positive, note some applications use"
398 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
399 return AVERROR(EINVAL);
402 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
403 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
404 return AVERROR(EINVAL);
406 s->user_specified_pts = AV_NOPTS_VALUE;
408 if (s->gop_size <= 1) {
415 #if FF_API_MOTION_EST
416 FF_DISABLE_DEPRECATION_WARNINGS
417 s->me_method = avctx->me_method;
418 FF_ENABLE_DEPRECATION_WARNINGS
422 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
425 FF_DISABLE_DEPRECATION_WARNINGS
426 if (avctx->border_masking != 0.0)
427 s->border_masking = avctx->border_masking;
428 FF_ENABLE_DEPRECATION_WARNINGS
431 s->adaptive_quant = (s->avctx->lumi_masking ||
432 s->avctx->dark_masking ||
433 s->avctx->temporal_cplx_masking ||
434 s->avctx->spatial_cplx_masking ||
435 s->avctx->p_masking ||
437 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
440 s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
442 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
443 switch(avctx->codec_id) {
444 case AV_CODEC_ID_MPEG1VIDEO:
445 case AV_CODEC_ID_MPEG2VIDEO:
446 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
448 case AV_CODEC_ID_MPEG4:
449 case AV_CODEC_ID_MSMPEG4V1:
450 case AV_CODEC_ID_MSMPEG4V2:
451 case AV_CODEC_ID_MSMPEG4V3:
452 if (avctx->rc_max_rate >= 15000000) {
453 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
454 } else if(avctx->rc_max_rate >= 2000000) {
455 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
456 } else if(avctx->rc_max_rate >= 384000) {
457 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
459 avctx->rc_buffer_size = 40;
460 avctx->rc_buffer_size *= 16384;
463 if (avctx->rc_buffer_size) {
464 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
468 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
469 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
473 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
474 av_log(avctx, AV_LOG_INFO,
475 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
478 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
479 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
483 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
484 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
488 if (avctx->rc_max_rate &&
489 avctx->rc_max_rate == avctx->bit_rate &&
490 avctx->rc_max_rate != avctx->rc_min_rate) {
491 av_log(avctx, AV_LOG_INFO,
492 "impossible bitrate constraints, this will fail\n");
495 if (avctx->rc_buffer_size &&
496 avctx->bit_rate * (int64_t)avctx->time_base.num >
497 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
498 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
502 if (!s->fixed_qscale &&
503 avctx->bit_rate * av_q2d(avctx->time_base) >
504 avctx->bit_rate_tolerance) {
505 av_log(avctx, AV_LOG_WARNING,
506 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, (int64_t)avctx->bit_rate);
507 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
510 if (s->avctx->rc_max_rate &&
511 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
512 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
513 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
514 90000LL * (avctx->rc_buffer_size - 1) >
515 s->avctx->rc_max_rate * 0xFFFFLL) {
516 av_log(avctx, AV_LOG_INFO,
517 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
518 "specified vbv buffer is too large for the given bitrate!\n");
521 if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
522 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
523 s->codec_id != AV_CODEC_ID_FLV1) {
524 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
528 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
529 av_log(avctx, AV_LOG_ERROR,
530 "OBMC is only supported with simple mb decision\n");
534 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
535 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
539 if (s->max_b_frames &&
540 s->codec_id != AV_CODEC_ID_MPEG4 &&
541 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
542 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
543 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
546 if (s->max_b_frames < 0) {
547 av_log(avctx, AV_LOG_ERROR,
548 "max b frames must be 0 or positive for mpegvideo based encoders\n");
552 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
553 s->codec_id == AV_CODEC_ID_H263 ||
554 s->codec_id == AV_CODEC_ID_H263P) &&
555 (avctx->sample_aspect_ratio.num > 255 ||
556 avctx->sample_aspect_ratio.den > 255)) {
557 av_log(avctx, AV_LOG_WARNING,
558 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
559 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
560 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
561 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
564 if ((s->codec_id == AV_CODEC_ID_H263 ||
565 s->codec_id == AV_CODEC_ID_H263P) &&
566 (avctx->width > 2048 ||
567 avctx->height > 1152 )) {
568 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
571 if ((s->codec_id == AV_CODEC_ID_H263 ||
572 s->codec_id == AV_CODEC_ID_H263P) &&
573 ((avctx->width &3) ||
574 (avctx->height&3) )) {
575 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
579 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
580 (avctx->width > 4095 ||
581 avctx->height > 4095 )) {
582 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
586 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
587 (avctx->width > 16383 ||
588 avctx->height > 16383 )) {
589 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
593 if (s->codec_id == AV_CODEC_ID_RV10 &&
595 avctx->height&15 )) {
596 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
597 return AVERROR(EINVAL);
600 if (s->codec_id == AV_CODEC_ID_RV20 &&
603 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
604 return AVERROR(EINVAL);
607 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
608 s->codec_id == AV_CODEC_ID_WMV2) &&
610 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
614 if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
615 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
616 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
620 #if FF_API_PRIVATE_OPT
621 FF_DISABLE_DEPRECATION_WARNINGS
622 if (avctx->mpeg_quant)
623 s->mpeg_quant = avctx->mpeg_quant;
624 FF_ENABLE_DEPRECATION_WARNINGS
627 // FIXME mpeg2 uses that too
628 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
629 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
630 av_log(avctx, AV_LOG_ERROR,
631 "mpeg2 style quantization not supported by codec\n");
635 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
636 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
640 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
641 s->avctx->mb_decision != FF_MB_DECISION_RD) {
642 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
646 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
647 (s->codec_id == AV_CODEC_ID_AMV ||
648 s->codec_id == AV_CODEC_ID_MJPEG)) {
649 // Used to produce garbage with MJPEG.
650 av_log(avctx, AV_LOG_ERROR,
651 "QP RD is no longer compatible with MJPEG or AMV\n");
655 #if FF_API_PRIVATE_OPT
656 FF_DISABLE_DEPRECATION_WARNINGS
657 if (avctx->scenechange_threshold)
658 s->scenechange_threshold = avctx->scenechange_threshold;
659 FF_ENABLE_DEPRECATION_WARNINGS
662 if (s->scenechange_threshold < 1000000000 &&
663 (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
664 av_log(avctx, AV_LOG_ERROR,
665 "closed gop with scene change detection are not supported yet, "
666 "set threshold to 1000000000\n");
670 if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
671 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
672 av_log(avctx, AV_LOG_ERROR,
673 "low delay forcing is only available for mpeg2\n");
676 if (s->max_b_frames != 0) {
677 av_log(avctx, AV_LOG_ERROR,
678 "B-frames cannot be used with low delay\n");
683 if (s->q_scale_type == 1) {
684 if (avctx->qmax > 28) {
685 av_log(avctx, AV_LOG_ERROR,
686 "non linear quant only supports qmax <= 28 currently\n");
691 if (avctx->slices > 1 &&
692 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
693 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
694 return AVERROR(EINVAL);
697 if (s->avctx->thread_count > 1 &&
698 s->codec_id != AV_CODEC_ID_MPEG4 &&
699 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
700 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
701 s->codec_id != AV_CODEC_ID_MJPEG &&
702 (s->codec_id != AV_CODEC_ID_H263P)) {
703 av_log(avctx, AV_LOG_ERROR,
704 "multi threaded encoding not supported by codec\n");
708 if (s->avctx->thread_count < 1) {
709 av_log(avctx, AV_LOG_ERROR,
710 "automatic thread number detection not supported by codec, "
715 if (!avctx->time_base.den || !avctx->time_base.num) {
716 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
720 #if FF_API_PRIVATE_OPT
721 FF_DISABLE_DEPRECATION_WARNINGS
722 if (avctx->b_frame_strategy)
723 s->b_frame_strategy = avctx->b_frame_strategy;
724 if (avctx->b_sensitivity != 40)
725 s->b_sensitivity = avctx->b_sensitivity;
726 FF_ENABLE_DEPRECATION_WARNINGS
729 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
730 av_log(avctx, AV_LOG_INFO,
731 "notice: b_frame_strategy only affects the first pass\n");
732 s->b_frame_strategy = 0;
735 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
737 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
738 avctx->time_base.den /= i;
739 avctx->time_base.num /= i;
743 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
744 // (a + x * 3 / 8) / x
745 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
746 s->inter_quant_bias = 0;
748 s->intra_quant_bias = 0;
750 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
753 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
754 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
755 return AVERROR(EINVAL);
758 #if FF_API_QUANT_BIAS
759 FF_DISABLE_DEPRECATION_WARNINGS
760 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
761 s->intra_quant_bias = avctx->intra_quant_bias;
762 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
763 s->inter_quant_bias = avctx->inter_quant_bias;
764 FF_ENABLE_DEPRECATION_WARNINGS
767 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
769 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
770 s->avctx->time_base.den > (1 << 16) - 1) {
771 av_log(avctx, AV_LOG_ERROR,
772 "timebase %d/%d not supported by MPEG 4 standard, "
773 "the maximum admitted value for the timebase denominator "
774 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
778 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
780 switch (avctx->codec->id) {
781 case AV_CODEC_ID_MPEG1VIDEO:
782 s->out_format = FMT_MPEG1;
783 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
784 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
786 case AV_CODEC_ID_MPEG2VIDEO:
787 s->out_format = FMT_MPEG1;
788 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
789 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
792 case AV_CODEC_ID_MJPEG:
793 case AV_CODEC_ID_AMV:
794 s->out_format = FMT_MJPEG;
795 s->intra_only = 1; /* force intra only for jpeg */
796 if (!CONFIG_MJPEG_ENCODER ||
797 ff_mjpeg_encode_init(s) < 0)
802 case AV_CODEC_ID_H261:
803 if (!CONFIG_H261_ENCODER)
805 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
806 av_log(avctx, AV_LOG_ERROR,
807 "The specified picture size of %dx%d is not valid for the "
808 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
809 s->width, s->height);
812 s->out_format = FMT_H261;
815 s->rtp_mode = 0; /* Sliced encoding not supported */
817 case AV_CODEC_ID_H263:
818 if (!CONFIG_H263_ENCODER)
820 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
821 s->width, s->height) == 8) {
822 av_log(avctx, AV_LOG_ERROR,
823 "The specified picture size of %dx%d is not valid for "
824 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
825 "352x288, 704x576, and 1408x1152. "
826 "Try H.263+.\n", s->width, s->height);
829 s->out_format = FMT_H263;
833 case AV_CODEC_ID_H263P:
834 s->out_format = FMT_H263;
837 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
838 s->modified_quant = s->h263_aic;
839 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
840 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
843 /* These are just to be sure */
847 case AV_CODEC_ID_FLV1:
848 s->out_format = FMT_H263;
849 s->h263_flv = 2; /* format = 1; 11-bit codes */
850 s->unrestricted_mv = 1;
851 s->rtp_mode = 0; /* don't allow GOB */
855 case AV_CODEC_ID_RV10:
856 s->out_format = FMT_H263;
860 case AV_CODEC_ID_RV20:
861 s->out_format = FMT_H263;
864 s->modified_quant = 1;
868 s->unrestricted_mv = 0;
870 case AV_CODEC_ID_MPEG4:
871 s->out_format = FMT_H263;
873 s->unrestricted_mv = 1;
874 s->low_delay = s->max_b_frames ? 0 : 1;
875 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
877 case AV_CODEC_ID_MSMPEG4V2:
878 s->out_format = FMT_H263;
880 s->unrestricted_mv = 1;
881 s->msmpeg4_version = 2;
885 case AV_CODEC_ID_MSMPEG4V3:
886 s->out_format = FMT_H263;
888 s->unrestricted_mv = 1;
889 s->msmpeg4_version = 3;
890 s->flipflop_rounding = 1;
894 case AV_CODEC_ID_WMV1:
895 s->out_format = FMT_H263;
897 s->unrestricted_mv = 1;
898 s->msmpeg4_version = 4;
899 s->flipflop_rounding = 1;
903 case AV_CODEC_ID_WMV2:
904 s->out_format = FMT_H263;
906 s->unrestricted_mv = 1;
907 s->msmpeg4_version = 5;
908 s->flipflop_rounding = 1;
916 #if FF_API_PRIVATE_OPT
917 FF_DISABLE_DEPRECATION_WARNINGS
918 if (avctx->noise_reduction)
919 s->noise_reduction = avctx->noise_reduction;
920 FF_ENABLE_DEPRECATION_WARNINGS
923 avctx->has_b_frames = !s->low_delay;
927 s->progressive_frame =
928 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
929 AV_CODEC_FLAG_INTERLACED_ME) ||
934 if (ff_mpv_common_init(s) < 0)
937 ff_fdctdsp_init(&s->fdsp, avctx);
938 ff_me_cmp_init(&s->mecc, avctx);
939 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
940 ff_pixblockdsp_init(&s->pdsp, avctx);
941 ff_qpeldsp_init(&s->qdsp);
943 if (s->msmpeg4_version) {
944 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
945 2 * 2 * (MAX_LEVEL + 1) *
946 (MAX_RUN + 1) * 2 * sizeof(int), fail);
948 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
950 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
951 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
952 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
953 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
954 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
955 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
956 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
957 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
958 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
959 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
962 if (s->noise_reduction) {
963 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
964 2 * 64 * sizeof(uint16_t), fail);
967 ff_dct_encode_init(s);
969 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
970 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
972 if (s->slice_context_count > 1) {
975 if (avctx->codec_id == AV_CODEC_ID_H263P)
976 s->h263_slice_structured = 1;
979 s->quant_precision = 5;
981 #if FF_API_PRIVATE_OPT
982 FF_DISABLE_DEPRECATION_WARNINGS
983 if (avctx->frame_skip_threshold)
984 s->frame_skip_threshold = avctx->frame_skip_threshold;
985 if (avctx->frame_skip_factor)
986 s->frame_skip_factor = avctx->frame_skip_factor;
987 if (avctx->frame_skip_exp)
988 s->frame_skip_exp = avctx->frame_skip_exp;
989 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
990 s->frame_skip_cmp = avctx->frame_skip_cmp;
991 FF_ENABLE_DEPRECATION_WARNINGS
994 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
995 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
997 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
998 ff_h261_encode_init(s);
999 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
1000 ff_h263_encode_init(s);
1001 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
1002 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
1004 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1005 && s->out_format == FMT_MPEG1)
1006 ff_mpeg1_encode_init(s);
1009 for (i = 0; i < 64; i++) {
1010 int j = s->idsp.idct_permutation[i];
1011 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1013 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1014 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1015 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1016 s->intra_matrix[j] =
1017 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1020 s->chroma_intra_matrix[j] =
1021 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1022 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1024 if (s->avctx->intra_matrix)
1025 s->intra_matrix[j] = s->avctx->intra_matrix[i];
1026 if (s->avctx->inter_matrix)
1027 s->inter_matrix[j] = s->avctx->inter_matrix[i];
1030 /* precompute matrix */
1031 /* for mjpeg, we do include qscale in the matrix */
1032 if (s->out_format != FMT_MJPEG) {
1033 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1034 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1036 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1037 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1041 #if FF_API_RC_STRATEGY
1042 FF_DISABLE_DEPRECATION_WARNINGS
1043 if (!s->rc_strategy)
1044 s->rc_strategy = s->avctx->rc_strategy;
1045 FF_ENABLE_DEPRECATION_WARNINGS
1048 if (ff_rate_control_init(s) < 0)
1051 #if FF_API_RC_STRATEGY
1052 av_assert0(MPV_RC_STRATEGY_XVID == FF_RC_STRATEGY_XVID);
1055 if ((s->avctx->flags & AV_CODEC_FLAG_PASS2) && s->rc_strategy == MPV_RC_STRATEGY_XVID) {
1057 ret = ff_xvid_rate_control_init(s);
1059 ret = AVERROR(ENOSYS);
1060 av_log(s->avctx, AV_LOG_ERROR,
1061 "Xvid ratecontrol requires libavcodec compiled with Xvid support.\n");
1067 #if FF_API_ERROR_RATE
1068 FF_DISABLE_DEPRECATION_WARNINGS
1069 if (avctx->error_rate)
1070 s->error_rate = avctx->error_rate;
1071 FF_ENABLE_DEPRECATION_WARNINGS;
1074 #if FF_API_NORMALIZE_AQP
1075 FF_DISABLE_DEPRECATION_WARNINGS
1076 if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
1077 s->mpv_flags |= FF_MPV_FLAG_NAQ;
1078 FF_ENABLE_DEPRECATION_WARNINGS;
1082 FF_DISABLE_DEPRECATION_WARNINGS
1083 if (avctx->flags & CODEC_FLAG_MV0)
1084 s->mpv_flags |= FF_MPV_FLAG_MV0;
1085 FF_ENABLE_DEPRECATION_WARNINGS
1089 FF_DISABLE_DEPRECATION_WARNINGS
1090 if (avctx->rc_qsquish != 0.0)
1091 s->rc_qsquish = avctx->rc_qsquish;
1092 if (avctx->rc_qmod_amp != 0.0)
1093 s->rc_qmod_amp = avctx->rc_qmod_amp;
1094 if (avctx->rc_qmod_freq)
1095 s->rc_qmod_freq = avctx->rc_qmod_freq;
1096 if (avctx->rc_buffer_aggressivity != 1.0)
1097 s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
1098 if (avctx->rc_initial_cplx != 0.0)
1099 s->rc_initial_cplx = avctx->rc_initial_cplx;
1101 s->lmin = avctx->lmin;
1103 s->lmax = avctx->lmax;
1106 av_freep(&s->rc_eq);
1107 s->rc_eq = av_strdup(avctx->rc_eq);
1109 return AVERROR(ENOMEM);
1111 FF_ENABLE_DEPRECATION_WARNINGS
1114 #if FF_API_PRIVATE_OPT
1115 FF_DISABLE_DEPRECATION_WARNINGS
1116 if (avctx->brd_scale)
1117 s->brd_scale = avctx->brd_scale;
1119 if (avctx->prediction_method)
1120 s->pred = avctx->prediction_method + 1;
1121 FF_ENABLE_DEPRECATION_WARNINGS
1124 if (s->b_frame_strategy == 2) {
1125 for (i = 0; i < s->max_b_frames + 2; i++) {
1126 s->tmp_frames[i] = av_frame_alloc();
1127 if (!s->tmp_frames[i])
1128 return AVERROR(ENOMEM);
1130 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1131 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1132 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1134 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1140 cpb_props = ff_add_cpb_side_data(avctx);
1142 return AVERROR(ENOMEM);
1143 cpb_props->max_bitrate = avctx->rc_max_rate;
1144 cpb_props->min_bitrate = avctx->rc_min_rate;
1145 cpb_props->avg_bitrate = avctx->bit_rate;
1146 cpb_props->buffer_size = avctx->rc_buffer_size;
1150 ff_mpv_encode_end(avctx);
1151 return AVERROR_UNKNOWN;
1154 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1156 MpegEncContext *s = avctx->priv_data;
1159 ff_rate_control_uninit(s);
1161 if ((avctx->flags & AV_CODEC_FLAG_PASS2) && s->rc_strategy == MPV_RC_STRATEGY_XVID)
1162 ff_xvid_rate_control_uninit(s);
1165 ff_mpv_common_end(s);
1166 if (CONFIG_MJPEG_ENCODER &&
1167 s->out_format == FMT_MJPEG)
1168 ff_mjpeg_encode_close(s);
1170 av_freep(&avctx->extradata);
1172 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1173 av_frame_free(&s->tmp_frames[i]);
1175 ff_free_picture_tables(&s->new_picture);
1176 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1178 av_freep(&s->avctx->stats_out);
1179 av_freep(&s->ac_stats);
1181 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1182 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1183 s->q_chroma_intra_matrix= NULL;
1184 s->q_chroma_intra_matrix16= NULL;
1185 av_freep(&s->q_intra_matrix);
1186 av_freep(&s->q_inter_matrix);
1187 av_freep(&s->q_intra_matrix16);
1188 av_freep(&s->q_inter_matrix16);
1189 av_freep(&s->input_picture);
1190 av_freep(&s->reordered_input_picture);
1191 av_freep(&s->dct_offset);
1196 static int get_sae(uint8_t *src, int ref, int stride)
1201 for (y = 0; y < 16; y++) {
1202 for (x = 0; x < 16; x++) {
1203 acc += FFABS(src[x + y * stride] - ref);
1210 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1211 uint8_t *ref, int stride)
1217 h = s->height & ~15;
1219 for (y = 0; y < h; y += 16) {
1220 for (x = 0; x < w; x += 16) {
1221 int offset = x + y * stride;
1222 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1224 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1225 int sae = get_sae(src + offset, mean, stride);
1227 acc += sae + 500 < sad;
1233 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1235 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1236 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1237 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1238 &s->linesize, &s->uvlinesize);
1241 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1243 Picture *pic = NULL;
1245 int i, display_picture_number = 0, ret;
1246 int encoding_delay = s->max_b_frames ? s->max_b_frames
1247 : (s->low_delay ? 0 : 1);
1248 int flush_offset = 1;
1253 display_picture_number = s->input_picture_number++;
1255 if (pts != AV_NOPTS_VALUE) {
1256 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1257 int64_t last = s->user_specified_pts;
1260 av_log(s->avctx, AV_LOG_ERROR,
1261 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1263 return AVERROR(EINVAL);
1266 if (!s->low_delay && display_picture_number == 1)
1267 s->dts_delta = pts - last;
1269 s->user_specified_pts = pts;
1271 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1272 s->user_specified_pts =
1273 pts = s->user_specified_pts + 1;
1274 av_log(s->avctx, AV_LOG_INFO,
1275 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1278 pts = display_picture_number;
1282 if (!pic_arg->buf[0] ||
1283 pic_arg->linesize[0] != s->linesize ||
1284 pic_arg->linesize[1] != s->uvlinesize ||
1285 pic_arg->linesize[2] != s->uvlinesize)
1287 if ((s->width & 15) || (s->height & 15))
1289 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1291 if (s->linesize & (STRIDE_ALIGN-1))
1294 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1295 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1297 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1301 pic = &s->picture[i];
1305 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1308 ret = alloc_picture(s, pic, direct);
1313 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1314 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1315 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1318 int h_chroma_shift, v_chroma_shift;
1319 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1323 for (i = 0; i < 3; i++) {
1324 int src_stride = pic_arg->linesize[i];
1325 int dst_stride = i ? s->uvlinesize : s->linesize;
1326 int h_shift = i ? h_chroma_shift : 0;
1327 int v_shift = i ? v_chroma_shift : 0;
1328 int w = s->width >> h_shift;
1329 int h = s->height >> v_shift;
1330 uint8_t *src = pic_arg->data[i];
1331 uint8_t *dst = pic->f->data[i];
1334 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1335 && !s->progressive_sequence
1336 && FFALIGN(s->height, 32) - s->height > 16)
1339 if (!s->avctx->rc_buffer_size)
1340 dst += INPLACE_OFFSET;
1342 if (src_stride == dst_stride)
1343 memcpy(dst, src, src_stride * h);
1346 uint8_t *dst2 = dst;
1348 memcpy(dst2, src, w);
1353 if ((s->width & 15) || (s->height & (vpad-1))) {
1354 s->mpvencdsp.draw_edges(dst, dst_stride,
1364 ret = av_frame_copy_props(pic->f, pic_arg);
1368 pic->f->display_picture_number = display_picture_number;
1369 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1371 /* Flushing: When we have not received enough input frames,
1372 * ensure s->input_picture[0] contains the first picture */
1373 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1374 if (s->input_picture[flush_offset])
1377 if (flush_offset <= 1)
1380 encoding_delay = encoding_delay - flush_offset + 1;
1383 /* shift buffer entries */
1384 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1385 s->input_picture[i - flush_offset] = s->input_picture[i];
1387 s->input_picture[encoding_delay] = (Picture*) pic;
1392 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1396 int64_t score64 = 0;
1398 for (plane = 0; plane < 3; plane++) {
1399 const int stride = p->f->linesize[plane];
1400 const int bw = plane ? 1 : 2;
1401 for (y = 0; y < s->mb_height * bw; y++) {
1402 for (x = 0; x < s->mb_width * bw; x++) {
1403 int off = p->shared ? 0 : 16;
1404 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1405 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1406 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1408 switch (FFABS(s->frame_skip_exp)) {
1409 case 0: score = FFMAX(score, v); break;
1410 case 1: score += FFABS(v); break;
1411 case 2: score64 += v * (int64_t)v; break;
1412 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1413 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1422 if (s->frame_skip_exp < 0)
1423 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1424 -1.0/s->frame_skip_exp);
1426 if (score64 < s->frame_skip_threshold)
1428 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1433 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1435 AVPacket pkt = { 0 };
1436 int ret, got_output;
1438 av_init_packet(&pkt);
1439 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1444 av_packet_unref(&pkt);
1448 static int estimate_best_b_count(MpegEncContext *s)
1450 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1451 AVCodecContext *c = avcodec_alloc_context3(NULL);
1452 const int scale = s->brd_scale;
1453 int i, j, out_size, p_lambda, b_lambda, lambda2;
1454 int64_t best_rd = INT64_MAX;
1455 int best_b_count = -1;
1458 return AVERROR(ENOMEM);
1459 av_assert0(scale >= 0 && scale <= 3);
1462 //s->next_picture_ptr->quality;
1463 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1464 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1465 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1466 if (!b_lambda) // FIXME we should do this somewhere else
1467 b_lambda = p_lambda;
1468 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1471 c->width = s->width >> scale;
1472 c->height = s->height >> scale;
1473 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1474 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1475 c->mb_decision = s->avctx->mb_decision;
1476 c->me_cmp = s->avctx->me_cmp;
1477 c->mb_cmp = s->avctx->mb_cmp;
1478 c->me_sub_cmp = s->avctx->me_sub_cmp;
1479 c->pix_fmt = AV_PIX_FMT_YUV420P;
1480 c->time_base = s->avctx->time_base;
1481 c->max_b_frames = s->max_b_frames;
1483 if (avcodec_open2(c, codec, NULL) < 0)
1486 for (i = 0; i < s->max_b_frames + 2; i++) {
1487 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1488 s->next_picture_ptr;
1491 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1492 pre_input = *pre_input_ptr;
1493 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1495 if (!pre_input.shared && i) {
1496 data[0] += INPLACE_OFFSET;
1497 data[1] += INPLACE_OFFSET;
1498 data[2] += INPLACE_OFFSET;
1501 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1502 s->tmp_frames[i]->linesize[0],
1504 pre_input.f->linesize[0],
1505 c->width, c->height);
1506 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1507 s->tmp_frames[i]->linesize[1],
1509 pre_input.f->linesize[1],
1510 c->width >> 1, c->height >> 1);
1511 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1512 s->tmp_frames[i]->linesize[2],
1514 pre_input.f->linesize[2],
1515 c->width >> 1, c->height >> 1);
1519 for (j = 0; j < s->max_b_frames + 1; j++) {
1522 if (!s->input_picture[j])
1525 c->error[0] = c->error[1] = c->error[2] = 0;
1527 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1528 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1530 out_size = encode_frame(c, s->tmp_frames[0]);
1532 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1534 for (i = 0; i < s->max_b_frames + 1; i++) {
1535 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1537 s->tmp_frames[i + 1]->pict_type = is_p ?
1538 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1539 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1541 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1543 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1546 /* get the delayed frames */
1548 out_size = encode_frame(c, NULL);
1549 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1552 rd += c->error[0] + c->error[1] + c->error[2];
1560 avcodec_free_context(&c);
1562 return best_b_count;
1565 static int select_input_picture(MpegEncContext *s)
1569 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1570 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1571 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1573 /* set next picture type & ordering */
1574 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1575 if (s->frame_skip_threshold || s->frame_skip_factor) {
1576 if (s->picture_in_gop_number < s->gop_size &&
1577 s->next_picture_ptr &&
1578 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1579 // FIXME check that the gop check above is +-1 correct
1580 av_frame_unref(s->input_picture[0]->f);
1582 ff_vbv_update(s, 0);
1588 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1589 !s->next_picture_ptr || s->intra_only) {
1590 s->reordered_input_picture[0] = s->input_picture[0];
1591 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1592 s->reordered_input_picture[0]->f->coded_picture_number =
1593 s->coded_picture_number++;
1597 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1598 for (i = 0; i < s->max_b_frames + 1; i++) {
1599 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1601 if (pict_num >= s->rc_context.num_entries)
1603 if (!s->input_picture[i]) {
1604 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1608 s->input_picture[i]->f->pict_type =
1609 s->rc_context.entry[pict_num].new_pict_type;
1613 if (s->b_frame_strategy == 0) {
1614 b_frames = s->max_b_frames;
1615 while (b_frames && !s->input_picture[b_frames])
1617 } else if (s->b_frame_strategy == 1) {
1618 for (i = 1; i < s->max_b_frames + 1; i++) {
1619 if (s->input_picture[i] &&
1620 s->input_picture[i]->b_frame_score == 0) {
1621 s->input_picture[i]->b_frame_score =
1623 s->input_picture[i ]->f->data[0],
1624 s->input_picture[i - 1]->f->data[0],
1628 for (i = 0; i < s->max_b_frames + 1; i++) {
1629 if (!s->input_picture[i] ||
1630 s->input_picture[i]->b_frame_score - 1 >
1631 s->mb_num / s->b_sensitivity)
1635 b_frames = FFMAX(0, i - 1);
1638 for (i = 0; i < b_frames + 1; i++) {
1639 s->input_picture[i]->b_frame_score = 0;
1641 } else if (s->b_frame_strategy == 2) {
1642 b_frames = estimate_best_b_count(s);
1647 for (i = b_frames - 1; i >= 0; i--) {
1648 int type = s->input_picture[i]->f->pict_type;
1649 if (type && type != AV_PICTURE_TYPE_B)
1652 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1653 b_frames == s->max_b_frames) {
1654 av_log(s->avctx, AV_LOG_ERROR,
1655 "warning, too many B-frames in a row\n");
1658 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1659 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1660 s->gop_size > s->picture_in_gop_number) {
1661 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1663 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1665 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1669 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1670 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1673 s->reordered_input_picture[0] = s->input_picture[b_frames];
1674 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1675 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1676 s->reordered_input_picture[0]->f->coded_picture_number =
1677 s->coded_picture_number++;
1678 for (i = 0; i < b_frames; i++) {
1679 s->reordered_input_picture[i + 1] = s->input_picture[i];
1680 s->reordered_input_picture[i + 1]->f->pict_type =
1682 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1683 s->coded_picture_number++;
1688 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1690 if (s->reordered_input_picture[0]) {
1691 s->reordered_input_picture[0]->reference =
1692 s->reordered_input_picture[0]->f->pict_type !=
1693 AV_PICTURE_TYPE_B ? 3 : 0;
1695 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1698 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1699 // input is a shared pix, so we can't modify it -> allocate a new
1700 // one & ensure that the shared one is reuseable
1703 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1706 pic = &s->picture[i];
1708 pic->reference = s->reordered_input_picture[0]->reference;
1709 if (alloc_picture(s, pic, 0) < 0) {
1713 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1717 /* mark us unused / free shared pic */
1718 av_frame_unref(s->reordered_input_picture[0]->f);
1719 s->reordered_input_picture[0]->shared = 0;
1721 s->current_picture_ptr = pic;
1723 // input is not a shared pix -> reuse buffer for current_pix
1724 s->current_picture_ptr = s->reordered_input_picture[0];
1725 for (i = 0; i < 4; i++) {
1726 s->new_picture.f->data[i] += INPLACE_OFFSET;
1729 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1730 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1731 s->current_picture_ptr)) < 0)
1734 s->picture_number = s->new_picture.f->display_picture_number;
1739 static void frame_end(MpegEncContext *s)
1741 if (s->unrestricted_mv &&
1742 s->current_picture.reference &&
1744 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1745 int hshift = desc->log2_chroma_w;
1746 int vshift = desc->log2_chroma_h;
1747 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1748 s->current_picture.f->linesize[0],
1749 s->h_edge_pos, s->v_edge_pos,
1750 EDGE_WIDTH, EDGE_WIDTH,
1751 EDGE_TOP | EDGE_BOTTOM);
1752 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1753 s->current_picture.f->linesize[1],
1754 s->h_edge_pos >> hshift,
1755 s->v_edge_pos >> vshift,
1756 EDGE_WIDTH >> hshift,
1757 EDGE_WIDTH >> vshift,
1758 EDGE_TOP | EDGE_BOTTOM);
1759 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1760 s->current_picture.f->linesize[2],
1761 s->h_edge_pos >> hshift,
1762 s->v_edge_pos >> vshift,
1763 EDGE_WIDTH >> hshift,
1764 EDGE_WIDTH >> vshift,
1765 EDGE_TOP | EDGE_BOTTOM);
1770 s->last_pict_type = s->pict_type;
1771 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1772 if (s->pict_type!= AV_PICTURE_TYPE_B)
1773 s->last_non_b_pict_type = s->pict_type;
1775 #if FF_API_CODED_FRAME
1776 FF_DISABLE_DEPRECATION_WARNINGS
1777 av_frame_unref(s->avctx->coded_frame);
1778 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1779 FF_ENABLE_DEPRECATION_WARNINGS
1781 #if FF_API_ERROR_FRAME
1782 FF_DISABLE_DEPRECATION_WARNINGS
1783 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1784 sizeof(s->current_picture.encoding_error));
1785 FF_ENABLE_DEPRECATION_WARNINGS
1789 static void update_noise_reduction(MpegEncContext *s)
1793 for (intra = 0; intra < 2; intra++) {
1794 if (s->dct_count[intra] > (1 << 16)) {
1795 for (i = 0; i < 64; i++) {
1796 s->dct_error_sum[intra][i] >>= 1;
1798 s->dct_count[intra] >>= 1;
1801 for (i = 0; i < 64; i++) {
1802 s->dct_offset[intra][i] = (s->noise_reduction *
1803 s->dct_count[intra] +
1804 s->dct_error_sum[intra][i] / 2) /
1805 (s->dct_error_sum[intra][i] + 1);
1810 static int frame_start(MpegEncContext *s)
1814 /* mark & release old frames */
1815 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1816 s->last_picture_ptr != s->next_picture_ptr &&
1817 s->last_picture_ptr->f->buf[0]) {
1818 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1821 s->current_picture_ptr->f->pict_type = s->pict_type;
1822 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1824 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1825 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1826 s->current_picture_ptr)) < 0)
1829 if (s->pict_type != AV_PICTURE_TYPE_B) {
1830 s->last_picture_ptr = s->next_picture_ptr;
1832 s->next_picture_ptr = s->current_picture_ptr;
1835 if (s->last_picture_ptr) {
1836 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1837 if (s->last_picture_ptr->f->buf[0] &&
1838 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1839 s->last_picture_ptr)) < 0)
1842 if (s->next_picture_ptr) {
1843 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1844 if (s->next_picture_ptr->f->buf[0] &&
1845 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1846 s->next_picture_ptr)) < 0)
1850 if (s->picture_structure!= PICT_FRAME) {
1852 for (i = 0; i < 4; i++) {
1853 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1854 s->current_picture.f->data[i] +=
1855 s->current_picture.f->linesize[i];
1857 s->current_picture.f->linesize[i] *= 2;
1858 s->last_picture.f->linesize[i] *= 2;
1859 s->next_picture.f->linesize[i] *= 2;
1863 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1864 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1865 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1866 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1867 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1868 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1870 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1871 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1874 if (s->dct_error_sum) {
1875 av_assert2(s->noise_reduction && s->encoding);
1876 update_noise_reduction(s);
1882 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1883 const AVFrame *pic_arg, int *got_packet)
1885 MpegEncContext *s = avctx->priv_data;
1886 int i, stuffing_count, ret;
1887 int context_count = s->slice_context_count;
1889 s->vbv_ignore_qmax = 0;
1891 s->picture_in_gop_number++;
1893 if (load_input_picture(s, pic_arg) < 0)
1896 if (select_input_picture(s) < 0) {
1901 if (s->new_picture.f->data[0]) {
1902 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1903 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1905 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1906 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1909 s->mb_info_ptr = av_packet_new_side_data(pkt,
1910 AV_PKT_DATA_H263_MB_INFO,
1911 s->mb_width*s->mb_height*12);
1912 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1915 for (i = 0; i < context_count; i++) {
1916 int start_y = s->thread_context[i]->start_mb_y;
1917 int end_y = s->thread_context[i]-> end_mb_y;
1918 int h = s->mb_height;
1919 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1920 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1922 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1925 s->pict_type = s->new_picture.f->pict_type;
1927 ret = frame_start(s);
1931 ret = encode_picture(s, s->picture_number);
1932 if (growing_buffer) {
1933 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1934 pkt->data = s->pb.buf;
1935 pkt->size = avctx->internal->byte_buffer_size;
1940 #if FF_API_STAT_BITS
1941 FF_DISABLE_DEPRECATION_WARNINGS
1942 avctx->header_bits = s->header_bits;
1943 avctx->mv_bits = s->mv_bits;
1944 avctx->misc_bits = s->misc_bits;
1945 avctx->i_tex_bits = s->i_tex_bits;
1946 avctx->p_tex_bits = s->p_tex_bits;
1947 avctx->i_count = s->i_count;
1948 // FIXME f/b_count in avctx
1949 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1950 avctx->skip_count = s->skip_count;
1951 FF_ENABLE_DEPRECATION_WARNINGS
1956 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1957 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1959 if (avctx->rc_buffer_size) {
1960 RateControlContext *rcc = &s->rc_context;
1961 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1962 int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1963 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1965 if (put_bits_count(&s->pb) > max_size &&
1966 s->lambda < s->lmax) {
1967 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1968 (s->qscale + 1) / s->qscale);
1969 if (s->adaptive_quant) {
1971 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1972 s->lambda_table[i] =
1973 FFMAX(s->lambda_table[i] + min_step,
1974 s->lambda_table[i] * (s->qscale + 1) /
1977 s->mb_skipped = 0; // done in frame_start()
1978 // done in encode_picture() so we must undo it
1979 if (s->pict_type == AV_PICTURE_TYPE_P) {
1980 if (s->flipflop_rounding ||
1981 s->codec_id == AV_CODEC_ID_H263P ||
1982 s->codec_id == AV_CODEC_ID_MPEG4)
1983 s->no_rounding ^= 1;
1985 if (s->pict_type != AV_PICTURE_TYPE_B) {
1986 s->time_base = s->last_time_base;
1987 s->last_non_b_time = s->time - s->pp_time;
1989 for (i = 0; i < context_count; i++) {
1990 PutBitContext *pb = &s->thread_context[i]->pb;
1991 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1993 s->vbv_ignore_qmax = 1;
1994 av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1998 av_assert0(s->avctx->rc_max_rate);
2001 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
2002 ff_write_pass1_stats(s);
2004 for (i = 0; i < 4; i++) {
2005 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
2006 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
2008 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
2009 s->current_picture_ptr->encoding_error,
2010 (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
2013 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
2014 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
2015 s->misc_bits + s->i_tex_bits +
2017 flush_put_bits(&s->pb);
2018 s->frame_bits = put_bits_count(&s->pb);
2020 stuffing_count = ff_vbv_update(s, s->frame_bits);
2021 s->stuffing_bits = 8*stuffing_count;
2022 if (stuffing_count) {
2023 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
2024 stuffing_count + 50) {
2025 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
2029 switch (s->codec_id) {
2030 case AV_CODEC_ID_MPEG1VIDEO:
2031 case AV_CODEC_ID_MPEG2VIDEO:
2032 while (stuffing_count--) {
2033 put_bits(&s->pb, 8, 0);
2036 case AV_CODEC_ID_MPEG4:
2037 put_bits(&s->pb, 16, 0);
2038 put_bits(&s->pb, 16, 0x1C3);
2039 stuffing_count -= 4;
2040 while (stuffing_count--) {
2041 put_bits(&s->pb, 8, 0xFF);
2045 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2047 flush_put_bits(&s->pb);
2048 s->frame_bits = put_bits_count(&s->pb);
2051 /* update MPEG-1/2 vbv_delay for CBR */
2052 if (s->avctx->rc_max_rate &&
2053 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
2054 s->out_format == FMT_MPEG1 &&
2055 90000LL * (avctx->rc_buffer_size - 1) <=
2056 s->avctx->rc_max_rate * 0xFFFFLL) {
2057 AVCPBProperties *props;
2060 int vbv_delay, min_delay;
2061 double inbits = s->avctx->rc_max_rate *
2062 av_q2d(s->avctx->time_base);
2063 int minbits = s->frame_bits - 8 *
2064 (s->vbv_delay_ptr - s->pb.buf - 1);
2065 double bits = s->rc_context.buffer_index + minbits - inbits;
2068 av_log(s->avctx, AV_LOG_ERROR,
2069 "Internal error, negative bits\n");
2071 assert(s->repeat_first_field == 0);
2073 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
2074 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
2075 s->avctx->rc_max_rate;
2077 vbv_delay = FFMAX(vbv_delay, min_delay);
2079 av_assert0(vbv_delay < 0xFFFF);
2081 s->vbv_delay_ptr[0] &= 0xF8;
2082 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2083 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2084 s->vbv_delay_ptr[2] &= 0x07;
2085 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2087 props = av_cpb_properties_alloc(&props_size);
2089 return AVERROR(ENOMEM);
2090 props->vbv_delay = vbv_delay * 300;
2092 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2093 (uint8_t*)props, props_size);
2099 #if FF_API_VBV_DELAY
2100 FF_DISABLE_DEPRECATION_WARNINGS
2101 avctx->vbv_delay = vbv_delay * 300;
2102 FF_ENABLE_DEPRECATION_WARNINGS
2105 s->total_bits += s->frame_bits;
2106 #if FF_API_STAT_BITS
2107 FF_DISABLE_DEPRECATION_WARNINGS
2108 avctx->frame_bits = s->frame_bits;
2109 FF_ENABLE_DEPRECATION_WARNINGS
2113 pkt->pts = s->current_picture.f->pts;
2114 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2115 if (!s->current_picture.f->coded_picture_number)
2116 pkt->dts = pkt->pts - s->dts_delta;
2118 pkt->dts = s->reordered_pts;
2119 s->reordered_pts = pkt->pts;
2121 pkt->dts = pkt->pts;
2122 if (s->current_picture.f->key_frame)
2123 pkt->flags |= AV_PKT_FLAG_KEY;
2125 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2130 /* release non-reference frames */
2131 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2132 if (!s->picture[i].reference)
2133 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2136 av_assert1((s->frame_bits & 7) == 0);
2138 pkt->size = s->frame_bits / 8;
2139 *got_packet = !!pkt->size;
2143 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2144 int n, int threshold)
2146 static const char tab[64] = {
2147 3, 2, 2, 1, 1, 1, 1, 1,
2148 1, 1, 1, 1, 1, 1, 1, 1,
2149 1, 1, 1, 1, 1, 1, 1, 1,
2150 0, 0, 0, 0, 0, 0, 0, 0,
2151 0, 0, 0, 0, 0, 0, 0, 0,
2152 0, 0, 0, 0, 0, 0, 0, 0,
2153 0, 0, 0, 0, 0, 0, 0, 0,
2154 0, 0, 0, 0, 0, 0, 0, 0
2159 int16_t *block = s->block[n];
2160 const int last_index = s->block_last_index[n];
2163 if (threshold < 0) {
2165 threshold = -threshold;
2169 /* Are all we could set to zero already zero? */
2170 if (last_index <= skip_dc - 1)
2173 for (i = 0; i <= last_index; i++) {
2174 const int j = s->intra_scantable.permutated[i];
2175 const int level = FFABS(block[j]);
2177 if (skip_dc && i == 0)
2181 } else if (level > 1) {
2187 if (score >= threshold)
2189 for (i = skip_dc; i <= last_index; i++) {
2190 const int j = s->intra_scantable.permutated[i];
2194 s->block_last_index[n] = 0;
2196 s->block_last_index[n] = -1;
2199 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2203 const int maxlevel = s->max_qcoeff;
2204 const int minlevel = s->min_qcoeff;
2208 i = 1; // skip clipping of intra dc
2212 for (; i <= last_index; i++) {
2213 const int j = s->intra_scantable.permutated[i];
2214 int level = block[j];
2216 if (level > maxlevel) {
2219 } else if (level < minlevel) {
2227 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2228 av_log(s->avctx, AV_LOG_INFO,
2229 "warning, clipping %d dct coefficients to %d..%d\n",
2230 overflow, minlevel, maxlevel);
2233 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2237 for (y = 0; y < 8; y++) {
2238 for (x = 0; x < 8; x++) {
2244 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2245 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2246 int v = ptr[x2 + y2 * stride];
2252 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2257 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2258 int motion_x, int motion_y,
2259 int mb_block_height,
2263 int16_t weight[12][64];
2264 int16_t orig[12][64];
2265 const int mb_x = s->mb_x;
2266 const int mb_y = s->mb_y;
2269 int dct_offset = s->linesize * 8; // default for progressive frames
2270 int uv_dct_offset = s->uvlinesize * 8;
2271 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2272 ptrdiff_t wrap_y, wrap_c;
2274 for (i = 0; i < mb_block_count; i++)
2275 skip_dct[i] = s->skipdct;
2277 if (s->adaptive_quant) {
2278 const int last_qp = s->qscale;
2279 const int mb_xy = mb_x + mb_y * s->mb_stride;
2281 s->lambda = s->lambda_table[mb_xy];
2284 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2285 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2286 s->dquant = s->qscale - last_qp;
2288 if (s->out_format == FMT_H263) {
2289 s->dquant = av_clip(s->dquant, -2, 2);
2291 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2293 if (s->pict_type == AV_PICTURE_TYPE_B) {
2294 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2297 if (s->mv_type == MV_TYPE_8X8)
2303 ff_set_qscale(s, last_qp + s->dquant);
2304 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2305 ff_set_qscale(s, s->qscale + s->dquant);
2307 wrap_y = s->linesize;
2308 wrap_c = s->uvlinesize;
2309 ptr_y = s->new_picture.f->data[0] +
2310 (mb_y * 16 * wrap_y) + mb_x * 16;
2311 ptr_cb = s->new_picture.f->data[1] +
2312 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2313 ptr_cr = s->new_picture.f->data[2] +
2314 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2316 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2317 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2318 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2319 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2320 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2322 16, 16, mb_x * 16, mb_y * 16,
2323 s->width, s->height);
2325 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2327 mb_block_width, mb_block_height,
2328 mb_x * mb_block_width, mb_y * mb_block_height,
2330 ptr_cb = ebuf + 16 * wrap_y;
2331 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2333 mb_block_width, mb_block_height,
2334 mb_x * mb_block_width, mb_y * mb_block_height,
2336 ptr_cr = ebuf + 16 * wrap_y + 16;
2340 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2341 int progressive_score, interlaced_score;
2343 s->interlaced_dct = 0;
2344 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2345 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2346 NULL, wrap_y, 8) - 400;
2348 if (progressive_score > 0) {
2349 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2350 NULL, wrap_y * 2, 8) +
2351 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2352 NULL, wrap_y * 2, 8);
2353 if (progressive_score > interlaced_score) {
2354 s->interlaced_dct = 1;
2356 dct_offset = wrap_y;
2357 uv_dct_offset = wrap_c;
2359 if (s->chroma_format == CHROMA_422 ||
2360 s->chroma_format == CHROMA_444)
2366 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2367 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2368 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2369 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2371 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2375 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2376 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2377 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2378 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2379 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2380 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2381 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2382 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2383 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2384 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2385 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2386 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2390 op_pixels_func (*op_pix)[4];
2391 qpel_mc_func (*op_qpix)[16];
2392 uint8_t *dest_y, *dest_cb, *dest_cr;
2394 dest_y = s->dest[0];
2395 dest_cb = s->dest[1];
2396 dest_cr = s->dest[2];
2398 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2399 op_pix = s->hdsp.put_pixels_tab;
2400 op_qpix = s->qdsp.put_qpel_pixels_tab;
2402 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2403 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2406 if (s->mv_dir & MV_DIR_FORWARD) {
2407 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2408 s->last_picture.f->data,
2410 op_pix = s->hdsp.avg_pixels_tab;
2411 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2413 if (s->mv_dir & MV_DIR_BACKWARD) {
2414 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2415 s->next_picture.f->data,
2419 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2420 int progressive_score, interlaced_score;
2422 s->interlaced_dct = 0;
2423 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2424 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2428 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2429 progressive_score -= 400;
2431 if (progressive_score > 0) {
2432 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2434 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2438 if (progressive_score > interlaced_score) {
2439 s->interlaced_dct = 1;
2441 dct_offset = wrap_y;
2442 uv_dct_offset = wrap_c;
2444 if (s->chroma_format == CHROMA_422)
2450 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2451 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2452 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2453 dest_y + dct_offset, wrap_y);
2454 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2455 dest_y + dct_offset + 8, wrap_y);
2457 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2461 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2462 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2463 if (!s->chroma_y_shift) { /* 422 */
2464 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2465 dest_cb + uv_dct_offset, wrap_c);
2466 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2467 dest_cr + uv_dct_offset, wrap_c);
2470 /* pre quantization */
2471 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2472 2 * s->qscale * s->qscale) {
2474 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2476 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2478 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2479 wrap_y, 8) < 20 * s->qscale)
2481 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2482 wrap_y, 8) < 20 * s->qscale)
2484 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2486 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2488 if (!s->chroma_y_shift) { /* 422 */
2489 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2490 dest_cb + uv_dct_offset,
2491 wrap_c, 8) < 20 * s->qscale)
2493 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2494 dest_cr + uv_dct_offset,
2495 wrap_c, 8) < 20 * s->qscale)
2501 if (s->quantizer_noise_shaping) {
2503 get_visual_weight(weight[0], ptr_y , wrap_y);
2505 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2507 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2509 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2511 get_visual_weight(weight[4], ptr_cb , wrap_c);
2513 get_visual_weight(weight[5], ptr_cr , wrap_c);
2514 if (!s->chroma_y_shift) { /* 422 */
2516 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2519 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2522 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2525 /* DCT & quantize */
2526 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2528 for (i = 0; i < mb_block_count; i++) {
2531 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2532 // FIXME we could decide to change to quantizer instead of
2534 // JS: I don't think that would be a good idea it could lower
2535 // quality instead of improve it. Just INTRADC clipping
2536 // deserves changes in quantizer
2538 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2540 s->block_last_index[i] = -1;
2542 if (s->quantizer_noise_shaping) {
2543 for (i = 0; i < mb_block_count; i++) {
2545 s->block_last_index[i] =
2546 dct_quantize_refine(s, s->block[i], weight[i],
2547 orig[i], i, s->qscale);
2552 if (s->luma_elim_threshold && !s->mb_intra)
2553 for (i = 0; i < 4; i++)
2554 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2555 if (s->chroma_elim_threshold && !s->mb_intra)
2556 for (i = 4; i < mb_block_count; i++)
2557 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2559 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2560 for (i = 0; i < mb_block_count; i++) {
2561 if (s->block_last_index[i] == -1)
2562 s->coded_score[i] = INT_MAX / 256;
2567 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2568 s->block_last_index[4] =
2569 s->block_last_index[5] = 0;
2571 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2572 if (!s->chroma_y_shift) { /* 422 / 444 */
2573 for (i=6; i<12; i++) {
2574 s->block_last_index[i] = 0;
2575 s->block[i][0] = s->block[4][0];
2580 // non c quantize code returns incorrect block_last_index FIXME
2581 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2582 for (i = 0; i < mb_block_count; i++) {
2584 if (s->block_last_index[i] > 0) {
2585 for (j = 63; j > 0; j--) {
2586 if (s->block[i][s->intra_scantable.permutated[j]])
2589 s->block_last_index[i] = j;
2594 /* huffman encode */
2595 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2596 case AV_CODEC_ID_MPEG1VIDEO:
2597 case AV_CODEC_ID_MPEG2VIDEO:
2598 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2599 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2601 case AV_CODEC_ID_MPEG4:
2602 if (CONFIG_MPEG4_ENCODER)
2603 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2605 case AV_CODEC_ID_MSMPEG4V2:
2606 case AV_CODEC_ID_MSMPEG4V3:
2607 case AV_CODEC_ID_WMV1:
2608 if (CONFIG_MSMPEG4_ENCODER)
2609 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2611 case AV_CODEC_ID_WMV2:
2612 if (CONFIG_WMV2_ENCODER)
2613 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2615 case AV_CODEC_ID_H261:
2616 if (CONFIG_H261_ENCODER)
2617 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2619 case AV_CODEC_ID_H263:
2620 case AV_CODEC_ID_H263P:
2621 case AV_CODEC_ID_FLV1:
2622 case AV_CODEC_ID_RV10:
2623 case AV_CODEC_ID_RV20:
2624 if (CONFIG_H263_ENCODER)
2625 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2627 case AV_CODEC_ID_MJPEG:
2628 case AV_CODEC_ID_AMV:
2629 if (CONFIG_MJPEG_ENCODER)
2630 ff_mjpeg_encode_mb(s, s->block);
2637 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2639 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2640 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2641 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2644 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2647 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2650 d->mb_skip_run= s->mb_skip_run;
2652 d->last_dc[i] = s->last_dc[i];
2655 d->mv_bits= s->mv_bits;
2656 d->i_tex_bits= s->i_tex_bits;
2657 d->p_tex_bits= s->p_tex_bits;
2658 d->i_count= s->i_count;
2659 d->f_count= s->f_count;
2660 d->b_count= s->b_count;
2661 d->skip_count= s->skip_count;
2662 d->misc_bits= s->misc_bits;
2666 d->qscale= s->qscale;
2667 d->dquant= s->dquant;
2669 d->esc3_level_length= s->esc3_level_length;
2672 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2675 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2676 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2679 d->mb_skip_run= s->mb_skip_run;
2681 d->last_dc[i] = s->last_dc[i];
2684 d->mv_bits= s->mv_bits;
2685 d->i_tex_bits= s->i_tex_bits;
2686 d->p_tex_bits= s->p_tex_bits;
2687 d->i_count= s->i_count;
2688 d->f_count= s->f_count;
2689 d->b_count= s->b_count;
2690 d->skip_count= s->skip_count;
2691 d->misc_bits= s->misc_bits;
2693 d->mb_intra= s->mb_intra;
2694 d->mb_skipped= s->mb_skipped;
2695 d->mv_type= s->mv_type;
2696 d->mv_dir= s->mv_dir;
2698 if(s->data_partitioning){
2700 d->tex_pb= s->tex_pb;
2704 d->block_last_index[i]= s->block_last_index[i];
2705 d->interlaced_dct= s->interlaced_dct;
2706 d->qscale= s->qscale;
2708 d->esc3_level_length= s->esc3_level_length;
2711 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2712 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2713 int *dmin, int *next_block, int motion_x, int motion_y)
2716 uint8_t *dest_backup[3];
2718 copy_context_before_encode(s, backup, type);
2720 s->block= s->blocks[*next_block];
2721 s->pb= pb[*next_block];
2722 if(s->data_partitioning){
2723 s->pb2 = pb2 [*next_block];
2724 s->tex_pb= tex_pb[*next_block];
2728 memcpy(dest_backup, s->dest, sizeof(s->dest));
2729 s->dest[0] = s->sc.rd_scratchpad;
2730 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2731 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2732 av_assert0(s->linesize >= 32); //FIXME
2735 encode_mb(s, motion_x, motion_y);
2737 score= put_bits_count(&s->pb);
2738 if(s->data_partitioning){
2739 score+= put_bits_count(&s->pb2);
2740 score+= put_bits_count(&s->tex_pb);
2743 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2744 ff_mpv_decode_mb(s, s->block);
2746 score *= s->lambda2;
2747 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2751 memcpy(s->dest, dest_backup, sizeof(s->dest));
2758 copy_context_after_encode(best, s, type);
2762 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2763 uint32_t *sq = ff_square_tab + 256;
2768 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2769 else if(w==8 && h==8)
2770 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2774 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2783 static int sse_mb(MpegEncContext *s){
2787 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2788 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2791 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2792 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2793 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2794 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2796 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2797 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2798 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2801 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2802 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2803 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2806 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2807 MpegEncContext *s= *(void**)arg;
2811 s->me.dia_size= s->avctx->pre_dia_size;
2812 s->first_slice_line=1;
2813 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2814 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2815 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2817 s->first_slice_line=0;
2825 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2826 MpegEncContext *s= *(void**)arg;
2828 ff_check_alignment();
2830 s->me.dia_size= s->avctx->dia_size;
2831 s->first_slice_line=1;
2832 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2833 s->mb_x=0; //for block init below
2834 ff_init_block_index(s);
2835 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2836 s->block_index[0]+=2;
2837 s->block_index[1]+=2;
2838 s->block_index[2]+=2;
2839 s->block_index[3]+=2;
2841 /* compute motion vector & mb_type and store in context */
2842 if(s->pict_type==AV_PICTURE_TYPE_B)
2843 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2845 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2847 s->first_slice_line=0;
2852 static int mb_var_thread(AVCodecContext *c, void *arg){
2853 MpegEncContext *s= *(void**)arg;
2856 ff_check_alignment();
2858 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2859 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2862 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2864 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2866 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2867 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2869 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2870 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2871 s->me.mb_var_sum_temp += varc;
2877 static void write_slice_end(MpegEncContext *s){
2878 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2879 if(s->partitioned_frame){
2880 ff_mpeg4_merge_partitions(s);
2883 ff_mpeg4_stuffing(&s->pb);
2884 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2885 ff_mjpeg_encode_stuffing(s);
2888 avpriv_align_put_bits(&s->pb);
2889 flush_put_bits(&s->pb);
2891 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2892 s->misc_bits+= get_bits_diff(s);
2895 static void write_mb_info(MpegEncContext *s)
2897 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2898 int offset = put_bits_count(&s->pb);
2899 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2900 int gobn = s->mb_y / s->gob_index;
2902 if (CONFIG_H263_ENCODER)
2903 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2904 bytestream_put_le32(&ptr, offset);
2905 bytestream_put_byte(&ptr, s->qscale);
2906 bytestream_put_byte(&ptr, gobn);
2907 bytestream_put_le16(&ptr, mba);
2908 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2909 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2910 /* 4MV not implemented */
2911 bytestream_put_byte(&ptr, 0); /* hmv2 */
2912 bytestream_put_byte(&ptr, 0); /* vmv2 */
2915 static void update_mb_info(MpegEncContext *s, int startcode)
2919 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2920 s->mb_info_size += 12;
2921 s->prev_mb_info = s->last_mb_info;
2924 s->prev_mb_info = put_bits_count(&s->pb)/8;
2925 /* This might have incremented mb_info_size above, and we return without
2926 * actually writing any info into that slot yet. But in that case,
2927 * this will be called again at the start of the after writing the
2928 * start code, actually writing the mb info. */
2932 s->last_mb_info = put_bits_count(&s->pb)/8;
2933 if (!s->mb_info_size)
2934 s->mb_info_size += 12;
2938 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2940 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2941 && s->slice_context_count == 1
2942 && s->pb.buf == s->avctx->internal->byte_buffer) {
2943 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2944 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2946 uint8_t *new_buffer = NULL;
2947 int new_buffer_size = 0;
2949 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2950 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2951 return AVERROR(ENOMEM);
2956 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2957 s->avctx->internal->byte_buffer_size + size_increase);
2959 return AVERROR(ENOMEM);
2961 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2962 av_free(s->avctx->internal->byte_buffer);
2963 s->avctx->internal->byte_buffer = new_buffer;
2964 s->avctx->internal->byte_buffer_size = new_buffer_size;
2965 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2966 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2967 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2969 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2970 return AVERROR(EINVAL);
2974 static int encode_thread(AVCodecContext *c, void *arg){
2975 MpegEncContext *s= *(void**)arg;
2977 int chr_h= 16>>s->chroma_y_shift;
2979 MpegEncContext best_s = { 0 }, backup_s;
2980 uint8_t bit_buf[2][MAX_MB_BYTES];
2981 uint8_t bit_buf2[2][MAX_MB_BYTES];
2982 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2983 PutBitContext pb[2], pb2[2], tex_pb[2];
2985 ff_check_alignment();
2988 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2989 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2990 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2993 s->last_bits= put_bits_count(&s->pb);
3004 /* init last dc values */
3005 /* note: quant matrix value (8) is implied here */
3006 s->last_dc[i] = 128 << s->intra_dc_precision;
3008 s->current_picture.encoding_error[i] = 0;
3010 if(s->codec_id==AV_CODEC_ID_AMV){
3011 s->last_dc[0] = 128*8/13;
3012 s->last_dc[1] = 128*8/14;
3013 s->last_dc[2] = 128*8/14;
3016 memset(s->last_mv, 0, sizeof(s->last_mv));
3020 switch(s->codec_id){
3021 case AV_CODEC_ID_H263:
3022 case AV_CODEC_ID_H263P:
3023 case AV_CODEC_ID_FLV1:
3024 if (CONFIG_H263_ENCODER)
3025 s->gob_index = H263_GOB_HEIGHT(s->height);
3027 case AV_CODEC_ID_MPEG4:
3028 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
3029 ff_mpeg4_init_partitions(s);
3035 s->first_slice_line = 1;
3036 s->ptr_lastgob = s->pb.buf;
3037 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
3041 ff_set_qscale(s, s->qscale);
3042 ff_init_block_index(s);
3044 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3045 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3046 int mb_type= s->mb_type[xy];
3050 int size_increase = s->avctx->internal->byte_buffer_size/4
3051 + s->mb_width*MAX_MB_BYTES;
3053 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
3054 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
3055 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3058 if(s->data_partitioning){
3059 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3060 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3061 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3067 s->mb_y = mb_y; // moved into loop, can get changed by H.261
3068 ff_update_block_index(s);
3070 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3071 ff_h261_reorder_mb_index(s);
3072 xy= s->mb_y*s->mb_stride + s->mb_x;
3073 mb_type= s->mb_type[xy];
3076 /* write gob / video packet header */
3078 int current_packet_size, is_gob_start;
3080 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3082 is_gob_start = s->rtp_payload_size &&
3083 current_packet_size >= s->rtp_payload_size &&
3086 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3088 switch(s->codec_id){
3089 case AV_CODEC_ID_H263:
3090 case AV_CODEC_ID_H263P:
3091 if(!s->h263_slice_structured)
3092 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3094 case AV_CODEC_ID_MPEG2VIDEO:
3095 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3096 case AV_CODEC_ID_MPEG1VIDEO:
3097 if(s->mb_skip_run) is_gob_start=0;
3099 case AV_CODEC_ID_MJPEG:
3100 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3105 if(s->start_mb_y != mb_y || mb_x!=0){
3108 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3109 ff_mpeg4_init_partitions(s);
3113 av_assert2((put_bits_count(&s->pb)&7) == 0);
3114 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3116 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3117 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3118 int d = 100 / s->error_rate;
3120 current_packet_size=0;
3121 s->pb.buf_ptr= s->ptr_lastgob;
3122 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3126 #if FF_API_RTP_CALLBACK
3127 FF_DISABLE_DEPRECATION_WARNINGS
3128 if (s->avctx->rtp_callback){
3129 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3130 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3132 FF_ENABLE_DEPRECATION_WARNINGS
3134 update_mb_info(s, 1);
3136 switch(s->codec_id){
3137 case AV_CODEC_ID_MPEG4:
3138 if (CONFIG_MPEG4_ENCODER) {
3139 ff_mpeg4_encode_video_packet_header(s);
3140 ff_mpeg4_clean_buffers(s);
3143 case AV_CODEC_ID_MPEG1VIDEO:
3144 case AV_CODEC_ID_MPEG2VIDEO:
3145 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3146 ff_mpeg1_encode_slice_header(s);
3147 ff_mpeg1_clean_buffers(s);
3150 case AV_CODEC_ID_H263:
3151 case AV_CODEC_ID_H263P:
3152 if (CONFIG_H263_ENCODER)
3153 ff_h263_encode_gob_header(s, mb_y);
3157 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3158 int bits= put_bits_count(&s->pb);
3159 s->misc_bits+= bits - s->last_bits;
3163 s->ptr_lastgob += current_packet_size;
3164 s->first_slice_line=1;
3165 s->resync_mb_x=mb_x;
3166 s->resync_mb_y=mb_y;
3170 if( (s->resync_mb_x == s->mb_x)
3171 && s->resync_mb_y+1 == s->mb_y){
3172 s->first_slice_line=0;
3176 s->dquant=0; //only for QP_RD
3178 update_mb_info(s, 0);
3180 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3182 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3184 copy_context_before_encode(&backup_s, s, -1);
3186 best_s.data_partitioning= s->data_partitioning;
3187 best_s.partitioned_frame= s->partitioned_frame;
3188 if(s->data_partitioning){
3189 backup_s.pb2= s->pb2;
3190 backup_s.tex_pb= s->tex_pb;
3193 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3194 s->mv_dir = MV_DIR_FORWARD;
3195 s->mv_type = MV_TYPE_16X16;
3197 s->mv[0][0][0] = s->p_mv_table[xy][0];
3198 s->mv[0][0][1] = s->p_mv_table[xy][1];
3199 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3200 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3202 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3203 s->mv_dir = MV_DIR_FORWARD;
3204 s->mv_type = MV_TYPE_FIELD;
3207 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3208 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3209 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3211 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3212 &dmin, &next_block, 0, 0);
3214 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3215 s->mv_dir = MV_DIR_FORWARD;
3216 s->mv_type = MV_TYPE_16X16;
3220 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3221 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3223 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3224 s->mv_dir = MV_DIR_FORWARD;
3225 s->mv_type = MV_TYPE_8X8;
3228 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3229 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3231 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3232 &dmin, &next_block, 0, 0);
3234 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3235 s->mv_dir = MV_DIR_FORWARD;
3236 s->mv_type = MV_TYPE_16X16;
3238 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3239 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3240 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3241 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3243 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3244 s->mv_dir = MV_DIR_BACKWARD;
3245 s->mv_type = MV_TYPE_16X16;
3247 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3248 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3249 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3250 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3252 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3253 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3254 s->mv_type = MV_TYPE_16X16;
3256 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3257 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3258 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3259 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3260 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3261 &dmin, &next_block, 0, 0);
3263 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3264 s->mv_dir = MV_DIR_FORWARD;
3265 s->mv_type = MV_TYPE_FIELD;
3268 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3269 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3270 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3272 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3273 &dmin, &next_block, 0, 0);
3275 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3276 s->mv_dir = MV_DIR_BACKWARD;
3277 s->mv_type = MV_TYPE_FIELD;
3280 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3281 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3282 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3284 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3285 &dmin, &next_block, 0, 0);
3287 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3288 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3289 s->mv_type = MV_TYPE_FIELD;
3291 for(dir=0; dir<2; dir++){
3293 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3294 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3295 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3298 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3299 &dmin, &next_block, 0, 0);
3301 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3303 s->mv_type = MV_TYPE_16X16;
3307 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3308 &dmin, &next_block, 0, 0);
3309 if(s->h263_pred || s->h263_aic){
3311 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3313 ff_clean_intra_table_entries(s); //old mode?
3317 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3318 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3319 const int last_qp= backup_s.qscale;
3322 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3323 static const int dquant_tab[4]={-1,1,-2,2};
3324 int storecoefs = s->mb_intra && s->dc_val[0];
3326 av_assert2(backup_s.dquant == 0);
3329 s->mv_dir= best_s.mv_dir;
3330 s->mv_type = MV_TYPE_16X16;
3331 s->mb_intra= best_s.mb_intra;
3332 s->mv[0][0][0] = best_s.mv[0][0][0];
3333 s->mv[0][0][1] = best_s.mv[0][0][1];
3334 s->mv[1][0][0] = best_s.mv[1][0][0];
3335 s->mv[1][0][1] = best_s.mv[1][0][1];
3337 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3338 for(; qpi<4; qpi++){
3339 int dquant= dquant_tab[qpi];
3340 qp= last_qp + dquant;
3341 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3343 backup_s.dquant= dquant;
3346 dc[i]= s->dc_val[0][ s->block_index[i] ];
3347 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3351 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3352 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3353 if(best_s.qscale != qp){
3356 s->dc_val[0][ s->block_index[i] ]= dc[i];
3357 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3364 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3365 int mx= s->b_direct_mv_table[xy][0];
3366 int my= s->b_direct_mv_table[xy][1];
3368 backup_s.dquant = 0;
3369 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3371 ff_mpeg4_set_direct_mv(s, mx, my);
3372 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3373 &dmin, &next_block, mx, my);
3375 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3376 backup_s.dquant = 0;
3377 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3379 ff_mpeg4_set_direct_mv(s, 0, 0);
3380 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3381 &dmin, &next_block, 0, 0);
3383 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3386 coded |= s->block_last_index[i];
3389 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3390 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3391 mx=my=0; //FIXME find the one we actually used
3392 ff_mpeg4_set_direct_mv(s, mx, my);
3393 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3401 s->mv_dir= best_s.mv_dir;
3402 s->mv_type = best_s.mv_type;
3404 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3405 s->mv[0][0][1] = best_s.mv[0][0][1];
3406 s->mv[1][0][0] = best_s.mv[1][0][0];
3407 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3410 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3411 &dmin, &next_block, mx, my);
3416 s->current_picture.qscale_table[xy] = best_s.qscale;
3418 copy_context_after_encode(s, &best_s, -1);
3420 pb_bits_count= put_bits_count(&s->pb);
3421 flush_put_bits(&s->pb);
3422 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3425 if(s->data_partitioning){
3426 pb2_bits_count= put_bits_count(&s->pb2);
3427 flush_put_bits(&s->pb2);
3428 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3429 s->pb2= backup_s.pb2;
3431 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3432 flush_put_bits(&s->tex_pb);
3433 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3434 s->tex_pb= backup_s.tex_pb;
3436 s->last_bits= put_bits_count(&s->pb);
3438 if (CONFIG_H263_ENCODER &&
3439 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3440 ff_h263_update_motion_val(s);
3442 if(next_block==0){ //FIXME 16 vs linesize16
3443 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3444 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3445 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3448 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3449 ff_mpv_decode_mb(s, s->block);
3451 int motion_x = 0, motion_y = 0;
3452 s->mv_type=MV_TYPE_16X16;
3453 // only one MB-Type possible
3456 case CANDIDATE_MB_TYPE_INTRA:
3459 motion_x= s->mv[0][0][0] = 0;
3460 motion_y= s->mv[0][0][1] = 0;
3462 case CANDIDATE_MB_TYPE_INTER:
3463 s->mv_dir = MV_DIR_FORWARD;
3465 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3466 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3468 case CANDIDATE_MB_TYPE_INTER_I:
3469 s->mv_dir = MV_DIR_FORWARD;
3470 s->mv_type = MV_TYPE_FIELD;
3473 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3474 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3475 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3478 case CANDIDATE_MB_TYPE_INTER4V:
3479 s->mv_dir = MV_DIR_FORWARD;
3480 s->mv_type = MV_TYPE_8X8;
3483 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3484 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3487 case CANDIDATE_MB_TYPE_DIRECT:
3488 if (CONFIG_MPEG4_ENCODER) {
3489 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3491 motion_x=s->b_direct_mv_table[xy][0];
3492 motion_y=s->b_direct_mv_table[xy][1];
3493 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3496 case CANDIDATE_MB_TYPE_DIRECT0:
3497 if (CONFIG_MPEG4_ENCODER) {
3498 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3500 ff_mpeg4_set_direct_mv(s, 0, 0);
3503 case CANDIDATE_MB_TYPE_BIDIR:
3504 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3506 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3507 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3508 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3509 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3511 case CANDIDATE_MB_TYPE_BACKWARD:
3512 s->mv_dir = MV_DIR_BACKWARD;
3514 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3515 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3517 case CANDIDATE_MB_TYPE_FORWARD:
3518 s->mv_dir = MV_DIR_FORWARD;
3520 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3521 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3523 case CANDIDATE_MB_TYPE_FORWARD_I:
3524 s->mv_dir = MV_DIR_FORWARD;
3525 s->mv_type = MV_TYPE_FIELD;
3528 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3529 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3530 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3533 case CANDIDATE_MB_TYPE_BACKWARD_I:
3534 s->mv_dir = MV_DIR_BACKWARD;
3535 s->mv_type = MV_TYPE_FIELD;
3538 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3539 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3540 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3543 case CANDIDATE_MB_TYPE_BIDIR_I:
3544 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3545 s->mv_type = MV_TYPE_FIELD;
3547 for(dir=0; dir<2; dir++){
3549 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3550 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3551 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3556 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3559 encode_mb(s, motion_x, motion_y);
3561 // RAL: Update last macroblock type
3562 s->last_mv_dir = s->mv_dir;
3564 if (CONFIG_H263_ENCODER &&
3565 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3566 ff_h263_update_motion_val(s);
3568 ff_mpv_decode_mb(s, s->block);
3571 /* clean the MV table in IPS frames for direct mode in B-frames */
3572 if(s->mb_intra /* && I,P,S_TYPE */){
3573 s->p_mv_table[xy][0]=0;
3574 s->p_mv_table[xy][1]=0;
3577 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3581 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3582 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3584 s->current_picture.encoding_error[0] += sse(
3585 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3586 s->dest[0], w, h, s->linesize);
3587 s->current_picture.encoding_error[1] += sse(
3588 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3589 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3590 s->current_picture.encoding_error[2] += sse(
3591 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3592 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3595 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3596 ff_h263_loop_filter(s);
3598 ff_dlog(s->avctx, "MB %d %d bits\n",
3599 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3603 //not beautiful here but we must write it before flushing so it has to be here
3604 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3605 ff_msmpeg4_encode_ext_header(s);
3609 #if FF_API_RTP_CALLBACK
3610 FF_DISABLE_DEPRECATION_WARNINGS
3611 /* Send the last GOB if RTP */
3612 if (s->avctx->rtp_callback) {
3613 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3614 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3615 /* Call the RTP callback to send the last GOB */
3617 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3619 FF_ENABLE_DEPRECATION_WARNINGS
3625 #define MERGE(field) dst->field += src->field; src->field=0
3626 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3627 MERGE(me.scene_change_score);
3628 MERGE(me.mc_mb_var_sum_temp);
3629 MERGE(me.mb_var_sum_temp);
3632 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3635 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3636 MERGE(dct_count[1]);
3645 MERGE(er.error_count);
3646 MERGE(padding_bug_score);
3647 MERGE(current_picture.encoding_error[0]);
3648 MERGE(current_picture.encoding_error[1]);
3649 MERGE(current_picture.encoding_error[2]);
3651 if (dst->noise_reduction){
3652 for(i=0; i<64; i++){
3653 MERGE(dct_error_sum[0][i]);
3654 MERGE(dct_error_sum[1][i]);
3658 assert(put_bits_count(&src->pb) % 8 ==0);
3659 assert(put_bits_count(&dst->pb) % 8 ==0);
3660 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3661 flush_put_bits(&dst->pb);
3664 static int estimate_qp(MpegEncContext *s, int dry_run){
3665 if (s->next_lambda){
3666 s->current_picture_ptr->f->quality =
3667 s->current_picture.f->quality = s->next_lambda;
3668 if(!dry_run) s->next_lambda= 0;
3669 } else if (!s->fixed_qscale) {
3672 if ((s->avctx->flags & AV_CODEC_FLAG_PASS2) && s->rc_strategy == MPV_RC_STRATEGY_XVID)
3673 quality = ff_xvid_rate_estimate_qscale(s, dry_run);
3676 quality = ff_rate_estimate_qscale(s, dry_run);
3677 s->current_picture_ptr->f->quality =
3678 s->current_picture.f->quality = quality;
3679 if (s->current_picture.f->quality < 0)
3683 if(s->adaptive_quant){
3684 switch(s->codec_id){
3685 case AV_CODEC_ID_MPEG4:
3686 if (CONFIG_MPEG4_ENCODER)
3687 ff_clean_mpeg4_qscales(s);
3689 case AV_CODEC_ID_H263:
3690 case AV_CODEC_ID_H263P:
3691 case AV_CODEC_ID_FLV1:
3692 if (CONFIG_H263_ENCODER)
3693 ff_clean_h263_qscales(s);
3696 ff_init_qscale_tab(s);
3699 s->lambda= s->lambda_table[0];
3702 s->lambda = s->current_picture.f->quality;
3707 /* must be called before writing the header */
3708 static void set_frame_distances(MpegEncContext * s){
3709 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3710 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3712 if(s->pict_type==AV_PICTURE_TYPE_B){
3713 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3714 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3716 s->pp_time= s->time - s->last_non_b_time;
3717 s->last_non_b_time= s->time;
3718 assert(s->picture_number==0 || s->pp_time > 0);
3722 static int encode_picture(MpegEncContext *s, int picture_number)
3726 int context_count = s->slice_context_count;
3728 s->picture_number = picture_number;
3730 /* Reset the average MB variance */
3731 s->me.mb_var_sum_temp =
3732 s->me.mc_mb_var_sum_temp = 0;
3734 /* we need to initialize some time vars before we can encode B-frames */
3735 // RAL: Condition added for MPEG1VIDEO
3736 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3737 set_frame_distances(s);
3738 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3739 ff_set_mpeg4_time(s);
3741 s->me.scene_change_score=0;
3743 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3745 if(s->pict_type==AV_PICTURE_TYPE_I){
3746 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3747 else s->no_rounding=0;
3748 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3749 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3750 s->no_rounding ^= 1;
3753 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3754 if (estimate_qp(s,1) < 0)
3756 ff_get_2pass_fcode(s);
3757 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3758 if(s->pict_type==AV_PICTURE_TYPE_B)
3759 s->lambda= s->last_lambda_for[s->pict_type];
3761 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3765 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3766 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3767 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3768 s->q_chroma_intra_matrix = s->q_intra_matrix;
3769 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3772 s->mb_intra=0; //for the rate distortion & bit compare functions
3773 for(i=1; i<context_count; i++){
3774 ret = ff_update_duplicate_context(s->thread_context[i], s);
3782 /* Estimate motion for every MB */
3783 if(s->pict_type != AV_PICTURE_TYPE_I){
3784 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3785 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3786 if (s->pict_type != AV_PICTURE_TYPE_B) {
3787 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3789 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3793 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3794 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3796 for(i=0; i<s->mb_stride*s->mb_height; i++)
3797 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3799 if(!s->fixed_qscale){
3800 /* finding spatial complexity for I-frame rate control */
3801 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3804 for(i=1; i<context_count; i++){
3805 merge_context_after_me(s, s->thread_context[i]);
3807 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3808 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3811 if (s->me.scene_change_score > s->scenechange_threshold &&
3812 s->pict_type == AV_PICTURE_TYPE_P) {
3813 s->pict_type= AV_PICTURE_TYPE_I;
3814 for(i=0; i<s->mb_stride*s->mb_height; i++)
3815 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3816 if(s->msmpeg4_version >= 3)
3818 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3819 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3823 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3824 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3826 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3828 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3829 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3830 s->f_code= FFMAX3(s->f_code, a, b);
3833 ff_fix_long_p_mvs(s);
3834 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3835 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3839 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3840 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3845 if(s->pict_type==AV_PICTURE_TYPE_B){
3848 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3849 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3850 s->f_code = FFMAX(a, b);
3852 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3853 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3854 s->b_code = FFMAX(a, b);
3856 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3857 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3858 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3859 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3860 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3862 for(dir=0; dir<2; dir++){
3865 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3866 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3867 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3868 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3876 if (estimate_qp(s, 0) < 0)
3879 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3880 s->pict_type == AV_PICTURE_TYPE_I &&
3881 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3882 s->qscale= 3; //reduce clipping problems
3884 if (s->out_format == FMT_MJPEG) {
3885 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3886 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3888 if (s->avctx->intra_matrix) {
3890 luma_matrix = s->avctx->intra_matrix;
3892 if (s->avctx->chroma_intra_matrix)
3893 chroma_matrix = s->avctx->chroma_intra_matrix;
3895 /* for mjpeg, we do include qscale in the matrix */
3897 int j = s->idsp.idct_permutation[i];
3899 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3900 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3902 s->y_dc_scale_table=
3903 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3904 s->chroma_intra_matrix[0] =
3905 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3906 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3907 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3908 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3909 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3912 if(s->codec_id == AV_CODEC_ID_AMV){
3913 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3914 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3916 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3918 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3919 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3921 s->y_dc_scale_table= y;
3922 s->c_dc_scale_table= c;
3923 s->intra_matrix[0] = 13;
3924 s->chroma_intra_matrix[0] = 14;
3925 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3926 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3927 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3928 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3932 //FIXME var duplication
3933 s->current_picture_ptr->f->key_frame =
3934 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3935 s->current_picture_ptr->f->pict_type =
3936 s->current_picture.f->pict_type = s->pict_type;
3938 if (s->current_picture.f->key_frame)
3939 s->picture_in_gop_number=0;
3941 s->mb_x = s->mb_y = 0;
3942 s->last_bits= put_bits_count(&s->pb);
3943 switch(s->out_format) {
3945 if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3946 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3947 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3950 if (CONFIG_H261_ENCODER)
3951 ff_h261_encode_picture_header(s, picture_number);
3954 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3955 ff_wmv2_encode_picture_header(s, picture_number);
3956 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3957 ff_msmpeg4_encode_picture_header(s, picture_number);
3958 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3959 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3962 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3963 ret = ff_rv10_encode_picture_header(s, picture_number);
3967 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3968 ff_rv20_encode_picture_header(s, picture_number);
3969 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3970 ff_flv_encode_picture_header(s, picture_number);
3971 else if (CONFIG_H263_ENCODER)
3972 ff_h263_encode_picture_header(s, picture_number);
3975 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3976 ff_mpeg1_encode_picture_header(s, picture_number);
3981 bits= put_bits_count(&s->pb);
3982 s->header_bits= bits - s->last_bits;
3984 for(i=1; i<context_count; i++){
3985 update_duplicate_context_after_me(s->thread_context[i], s);
3987 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3988 for(i=1; i<context_count; i++){
3989 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3990 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3991 merge_context_after_encode(s, s->thread_context[i]);
3997 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3998 const int intra= s->mb_intra;
4001 s->dct_count[intra]++;
4003 for(i=0; i<64; i++){
4004 int level= block[i];
4008 s->dct_error_sum[intra][i] += level;
4009 level -= s->dct_offset[intra][i];
4010 if(level<0) level=0;
4012 s->dct_error_sum[intra][i] -= level;
4013 level += s->dct_offset[intra][i];
4014 if(level>0) level=0;
4021 static int dct_quantize_trellis_c(MpegEncContext *s,
4022 int16_t *block, int n,
4023 int qscale, int *overflow){
4025 const uint16_t *matrix;
4026 const uint8_t *scantable= s->intra_scantable.scantable;
4027 const uint8_t *perm_scantable= s->intra_scantable.permutated;
4029 unsigned int threshold1, threshold2;
4041 int coeff_count[64];
4042 int qmul, qadd, start_i, last_non_zero, i, dc;
4043 const int esc_length= s->ac_esc_length;
4045 uint8_t * last_length;
4046 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4049 s->fdsp.fdct(block);
4051 if(s->dct_error_sum)
4052 s->denoise_dct(s, block);
4054 qadd= ((qscale-1)|1)*8;
4056 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4057 else mpeg2_qscale = qscale << 1;
4068 /* For AIC we skip quant/dequant of INTRADC */
4073 /* note: block[0] is assumed to be positive */
4074 block[0] = (block[0] + (q >> 1)) / q;
4077 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4078 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4079 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4080 bias= 1<<(QMAT_SHIFT-1);
4082 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4083 length = s->intra_chroma_ac_vlc_length;
4084 last_length= s->intra_chroma_ac_vlc_last_length;
4086 length = s->intra_ac_vlc_length;
4087 last_length= s->intra_ac_vlc_last_length;
4092 qmat = s->q_inter_matrix[qscale];
4093 matrix = s->inter_matrix;
4094 length = s->inter_ac_vlc_length;
4095 last_length= s->inter_ac_vlc_last_length;
4099 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4100 threshold2= (threshold1<<1);
4102 for(i=63; i>=start_i; i--) {
4103 const int j = scantable[i];
4104 int level = block[j] * qmat[j];
4106 if(((unsigned)(level+threshold1))>threshold2){
4112 for(i=start_i; i<=last_non_zero; i++) {
4113 const int j = scantable[i];
4114 int level = block[j] * qmat[j];
4116 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4117 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4118 if(((unsigned)(level+threshold1))>threshold2){
4120 level= (bias + level)>>QMAT_SHIFT;
4122 coeff[1][i]= level-1;
4123 // coeff[2][k]= level-2;
4125 level= (bias - level)>>QMAT_SHIFT;
4126 coeff[0][i]= -level;
4127 coeff[1][i]= -level+1;
4128 // coeff[2][k]= -level+2;
4130 coeff_count[i]= FFMIN(level, 2);
4131 av_assert2(coeff_count[i]);
4134 coeff[0][i]= (level>>31)|1;
4139 *overflow= s->max_qcoeff < max; //overflow might have happened
4141 if(last_non_zero < start_i){
4142 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4143 return last_non_zero;
4146 score_tab[start_i]= 0;
4147 survivor[0]= start_i;
4150 for(i=start_i; i<=last_non_zero; i++){
4151 int level_index, j, zero_distortion;
4152 int dct_coeff= FFABS(block[ scantable[i] ]);
4153 int best_score=256*256*256*120;
4155 if (s->fdsp.fdct == ff_fdct_ifast)
4156 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4157 zero_distortion= dct_coeff*dct_coeff;
4159 for(level_index=0; level_index < coeff_count[i]; level_index++){
4161 int level= coeff[level_index][i];
4162 const int alevel= FFABS(level);
4167 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4168 unquant_coeff= alevel*qmul + qadd;
4169 } else if(s->out_format == FMT_MJPEG) {
4170 j = s->idsp.idct_permutation[scantable[i]];
4171 unquant_coeff = alevel * matrix[j] * 8;
4173 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4175 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4176 unquant_coeff = (unquant_coeff - 1) | 1;
4178 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4179 unquant_coeff = (unquant_coeff - 1) | 1;
4184 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4186 if((level&(~127)) == 0){
4187 for(j=survivor_count-1; j>=0; j--){
4188 int run= i - survivor[j];
4189 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4190 score += score_tab[i-run];
4192 if(score < best_score){
4195 level_tab[i+1]= level-64;
4199 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4200 for(j=survivor_count-1; j>=0; j--){
4201 int run= i - survivor[j];
4202 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4203 score += score_tab[i-run];
4204 if(score < last_score){
4207 last_level= level-64;
4213 distortion += esc_length*lambda;
4214 for(j=survivor_count-1; j>=0; j--){
4215 int run= i - survivor[j];
4216 int score= distortion + score_tab[i-run];
4218 if(score < best_score){
4221 level_tab[i+1]= level-64;
4225 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4226 for(j=survivor_count-1; j>=0; j--){
4227 int run= i - survivor[j];
4228 int score= distortion + score_tab[i-run];
4229 if(score < last_score){
4232 last_level= level-64;
4240 score_tab[i+1]= best_score;
4242 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4243 if(last_non_zero <= 27){
4244 for(; survivor_count; survivor_count--){
4245 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4249 for(; survivor_count; survivor_count--){
4250 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4255 survivor[ survivor_count++ ]= i+1;
4258 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4259 last_score= 256*256*256*120;
4260 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4261 int score= score_tab[i];
4263 score += lambda * 2; // FIXME more exact?
4265 if(score < last_score){
4268 last_level= level_tab[i];
4269 last_run= run_tab[i];
4274 s->coded_score[n] = last_score;
4276 dc= FFABS(block[0]);
4277 last_non_zero= last_i - 1;
4278 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4280 if(last_non_zero < start_i)
4281 return last_non_zero;
4283 if(last_non_zero == 0 && start_i == 0){
4285 int best_score= dc * dc;
4287 for(i=0; i<coeff_count[0]; i++){
4288 int level= coeff[i][0];
4289 int alevel= FFABS(level);
4290 int unquant_coeff, score, distortion;
4292 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4293 unquant_coeff= (alevel*qmul + qadd)>>3;
4295 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4296 unquant_coeff = (unquant_coeff - 1) | 1;
4298 unquant_coeff = (unquant_coeff + 4) >> 3;
4299 unquant_coeff<<= 3 + 3;
4301 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4303 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4304 else score= distortion + esc_length*lambda;
4306 if(score < best_score){
4308 best_level= level - 64;
4311 block[0]= best_level;
4312 s->coded_score[n] = best_score - dc*dc;
4313 if(best_level == 0) return -1;
4314 else return last_non_zero;
4318 av_assert2(last_level);
4320 block[ perm_scantable[last_non_zero] ]= last_level;
4323 for(; i>start_i; i -= run_tab[i] + 1){
4324 block[ perm_scantable[i-1] ]= level_tab[i];
4327 return last_non_zero;
4330 //#define REFINE_STATS 1
4331 static int16_t basis[64][64];
4333 static void build_basis(uint8_t *perm){
4340 double s= 0.25*(1<<BASIS_SHIFT);
4342 int perm_index= perm[index];
4343 if(i==0) s*= sqrt(0.5);
4344 if(j==0) s*= sqrt(0.5);
4345 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4352 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4353 int16_t *block, int16_t *weight, int16_t *orig,
4356 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4357 const uint8_t *scantable= s->intra_scantable.scantable;
4358 const uint8_t *perm_scantable= s->intra_scantable.permutated;
4359 // unsigned int threshold1, threshold2;
4364 int qmul, qadd, start_i, last_non_zero, i, dc;
4366 uint8_t * last_length;
4368 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4371 static int after_last=0;
4372 static int to_zero=0;
4373 static int from_zero=0;
4376 static int messed_sign=0;
4379 if(basis[0][0] == 0)
4380 build_basis(s->idsp.idct_permutation);
4391 /* For AIC we skip quant/dequant of INTRADC */
4395 q <<= RECON_SHIFT-3;
4396 /* note: block[0] is assumed to be positive */
4398 // block[0] = (block[0] + (q >> 1)) / q;
4400 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4401 // bias= 1<<(QMAT_SHIFT-1);
4402 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4403 length = s->intra_chroma_ac_vlc_length;
4404 last_length= s->intra_chroma_ac_vlc_last_length;
4406 length = s->intra_ac_vlc_length;
4407 last_length= s->intra_ac_vlc_last_length;
4412 length = s->inter_ac_vlc_length;
4413 last_length= s->inter_ac_vlc_last_length;
4415 last_non_zero = s->block_last_index[n];
4420 dc += (1<<(RECON_SHIFT-1));
4421 for(i=0; i<64; i++){
4422 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4425 STOP_TIMER("memset rem[]")}
4428 for(i=0; i<64; i++){
4433 w= FFABS(weight[i]) + qns*one;
4434 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4437 // w=weight[i] = (63*qns + (w/2)) / w;
4440 av_assert2(w<(1<<6));
4443 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4449 for(i=start_i; i<=last_non_zero; i++){
4450 int j= perm_scantable[i];
4451 const int level= block[j];
4455 if(level<0) coeff= qmul*level - qadd;
4456 else coeff= qmul*level + qadd;
4457 run_tab[rle_index++]=run;
4460 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4466 if(last_non_zero>0){
4467 STOP_TIMER("init rem[]")
4474 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4477 int run2, best_unquant_change=0, analyze_gradient;
4481 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4483 if(analyze_gradient){
4487 for(i=0; i<64; i++){
4490 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4493 STOP_TIMER("rem*w*w")}
4503 const int level= block[0];
4504 int change, old_coeff;
4506 av_assert2(s->mb_intra);
4510 for(change=-1; change<=1; change+=2){
4511 int new_level= level + change;
4512 int score, new_coeff;
4514 new_coeff= q*new_level;
4515 if(new_coeff >= 2048 || new_coeff < 0)
4518 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4519 new_coeff - old_coeff);
4520 if(score<best_score){
4523 best_change= change;
4524 best_unquant_change= new_coeff - old_coeff;
4531 run2= run_tab[rle_index++];
4535 for(i=start_i; i<64; i++){
4536 int j= perm_scantable[i];
4537 const int level= block[j];
4538 int change, old_coeff;
4540 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4544 if(level<0) old_coeff= qmul*level - qadd;
4545 else old_coeff= qmul*level + qadd;
4546 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4550 av_assert2(run2>=0 || i >= last_non_zero );
4553 for(change=-1; change<=1; change+=2){
4554 int new_level= level + change;
4555 int score, new_coeff, unquant_change;
4558 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4562 if(new_level<0) new_coeff= qmul*new_level - qadd;
4563 else new_coeff= qmul*new_level + qadd;
4564 if(new_coeff >= 2048 || new_coeff <= -2048)
4566 //FIXME check for overflow
4569 if(level < 63 && level > -63){
4570 if(i < last_non_zero)
4571 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4572 - length[UNI_AC_ENC_INDEX(run, level+64)];
4574 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4575 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4578 av_assert2(FFABS(new_level)==1);
4580 if(analyze_gradient){
4581 int g= d1[ scantable[i] ];
4582 if(g && (g^new_level) >= 0)
4586 if(i < last_non_zero){
4587 int next_i= i + run2 + 1;
4588 int next_level= block[ perm_scantable[next_i] ] + 64;
4590 if(next_level&(~127))
4593 if(next_i < last_non_zero)
4594 score += length[UNI_AC_ENC_INDEX(run, 65)]
4595 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4596 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4598 score += length[UNI_AC_ENC_INDEX(run, 65)]
4599 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4600 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4602 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4604 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4605 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4611 av_assert2(FFABS(level)==1);
4613 if(i < last_non_zero){
4614 int next_i= i + run2 + 1;
4615 int next_level= block[ perm_scantable[next_i] ] + 64;
4617 if(next_level&(~127))
4620 if(next_i < last_non_zero)
4621 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4622 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4623 - length[UNI_AC_ENC_INDEX(run, 65)];
4625 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4626 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4627 - length[UNI_AC_ENC_INDEX(run, 65)];
4629 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4631 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4632 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4639 unquant_change= new_coeff - old_coeff;
4640 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4642 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4644 if(score<best_score){
4647 best_change= change;
4648 best_unquant_change= unquant_change;
4652 prev_level= level + 64;
4653 if(prev_level&(~127))
4662 STOP_TIMER("iterative step")}
4666 int j= perm_scantable[ best_coeff ];
4668 block[j] += best_change;
4670 if(best_coeff > last_non_zero){
4671 last_non_zero= best_coeff;
4672 av_assert2(block[j]);
4679 if(block[j] - best_change){
4680 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4692 for(; last_non_zero>=start_i; last_non_zero--){
4693 if(block[perm_scantable[last_non_zero]])
4699 if(256*256*256*64 % count == 0){
4700 av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4705 for(i=start_i; i<=last_non_zero; i++){
4706 int j= perm_scantable[i];
4707 const int level= block[j];
4710 run_tab[rle_index++]=run;
4717 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4723 if(last_non_zero>0){
4724 STOP_TIMER("iterative search")
4729 return last_non_zero;
4733 * Permute an 8x8 block according to permutation.
4734 * @param block the block which will be permuted according to
4735 * the given permutation vector
4736 * @param permutation the permutation vector
4737 * @param last the last non zero coefficient in scantable order, used to
4738 * speed the permutation up
4739 * @param scantable the used scantable, this is only used to speed the
4740 * permutation up, the block is not (inverse) permutated
4741 * to scantable order!
4743 void ff_block_permute(int16_t *block, uint8_t *permutation,
4744 const uint8_t *scantable, int last)
4751 //FIXME it is ok but not clean and might fail for some permutations
4752 // if (permutation[1] == 1)
4755 for (i = 0; i <= last; i++) {
4756 const int j = scantable[i];
4761 for (i = 0; i <= last; i++) {
4762 const int j = scantable[i];
4763 const int perm_j = permutation[j];
4764 block[perm_j] = temp[j];
4768 int ff_dct_quantize_c(MpegEncContext *s,
4769 int16_t *block, int n,
4770 int qscale, int *overflow)
4772 int i, j, level, last_non_zero, q, start_i;
4774 const uint8_t *scantable= s->intra_scantable.scantable;
4777 unsigned int threshold1, threshold2;
4779 s->fdsp.fdct(block);
4781 if(s->dct_error_sum)
4782 s->denoise_dct(s, block);
4792 /* For AIC we skip quant/dequant of INTRADC */
4795 /* note: block[0] is assumed to be positive */
4796 block[0] = (block[0] + (q >> 1)) / q;
4799 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4800 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4804 qmat = s->q_inter_matrix[qscale];
4805 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4807 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4808 threshold2= (threshold1<<1);
4809 for(i=63;i>=start_i;i--) {
4811 level = block[j] * qmat[j];
4813 if(((unsigned)(level+threshold1))>threshold2){
4820 for(i=start_i; i<=last_non_zero; i++) {
4822 level = block[j] * qmat[j];
4824 // if( bias+level >= (1<<QMAT_SHIFT)
4825 // || bias-level >= (1<<QMAT_SHIFT)){
4826 if(((unsigned)(level+threshold1))>threshold2){
4828 level= (bias + level)>>QMAT_SHIFT;
4831 level= (bias - level)>>QMAT_SHIFT;
4839 *overflow= s->max_qcoeff < max; //overflow might have happened
4841 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4842 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4843 ff_block_permute(block, s->idsp.idct_permutation,
4844 scantable, last_non_zero);
4846 return last_non_zero;
4849 #define OFFSET(x) offsetof(MpegEncContext, x)
4850 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4851 static const AVOption h263_options[] = {
4852 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4853 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4858 static const AVClass h263_class = {
4859 .class_name = "H.263 encoder",
4860 .item_name = av_default_item_name,
4861 .option = h263_options,
4862 .version = LIBAVUTIL_VERSION_INT,
4865 AVCodec ff_h263_encoder = {
4867 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4868 .type = AVMEDIA_TYPE_VIDEO,
4869 .id = AV_CODEC_ID_H263,
4870 .priv_data_size = sizeof(MpegEncContext),
4871 .init = ff_mpv_encode_init,
4872 .encode2 = ff_mpv_encode_picture,
4873 .close = ff_mpv_encode_end,
4874 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4875 .priv_class = &h263_class,
4878 static const AVOption h263p_options[] = {
4879 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4880 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4881 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4882 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4886 static const AVClass h263p_class = {
4887 .class_name = "H.263p encoder",
4888 .item_name = av_default_item_name,
4889 .option = h263p_options,
4890 .version = LIBAVUTIL_VERSION_INT,
4893 AVCodec ff_h263p_encoder = {
4895 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4896 .type = AVMEDIA_TYPE_VIDEO,
4897 .id = AV_CODEC_ID_H263P,
4898 .priv_data_size = sizeof(MpegEncContext),
4899 .init = ff_mpv_encode_init,
4900 .encode2 = ff_mpv_encode_picture,
4901 .close = ff_mpv_encode_end,
4902 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4903 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4904 .priv_class = &h263p_class,
4907 static const AVClass msmpeg4v2_class = {
4908 .class_name = "msmpeg4v2 encoder",
4909 .item_name = av_default_item_name,
4910 .option = ff_mpv_generic_options,
4911 .version = LIBAVUTIL_VERSION_INT,
4914 AVCodec ff_msmpeg4v2_encoder = {
4915 .name = "msmpeg4v2",
4916 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4917 .type = AVMEDIA_TYPE_VIDEO,
4918 .id = AV_CODEC_ID_MSMPEG4V2,
4919 .priv_data_size = sizeof(MpegEncContext),
4920 .init = ff_mpv_encode_init,
4921 .encode2 = ff_mpv_encode_picture,
4922 .close = ff_mpv_encode_end,
4923 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4924 .priv_class = &msmpeg4v2_class,
4927 static const AVClass msmpeg4v3_class = {
4928 .class_name = "msmpeg4v3 encoder",
4929 .item_name = av_default_item_name,
4930 .option = ff_mpv_generic_options,
4931 .version = LIBAVUTIL_VERSION_INT,
4934 AVCodec ff_msmpeg4v3_encoder = {
4936 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4937 .type = AVMEDIA_TYPE_VIDEO,
4938 .id = AV_CODEC_ID_MSMPEG4V3,
4939 .priv_data_size = sizeof(MpegEncContext),
4940 .init = ff_mpv_encode_init,
4941 .encode2 = ff_mpv_encode_picture,
4942 .close = ff_mpv_encode_end,
4943 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4944 .priv_class = &msmpeg4v3_class,
4947 static const AVClass wmv1_class = {
4948 .class_name = "wmv1 encoder",
4949 .item_name = av_default_item_name,
4950 .option = ff_mpv_generic_options,
4951 .version = LIBAVUTIL_VERSION_INT,
4954 AVCodec ff_wmv1_encoder = {
4956 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4957 .type = AVMEDIA_TYPE_VIDEO,
4958 .id = AV_CODEC_ID_WMV1,
4959 .priv_data_size = sizeof(MpegEncContext),
4960 .init = ff_mpv_encode_init,
4961 .encode2 = ff_mpv_encode_picture,
4962 .close = ff_mpv_encode_end,
4963 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4964 .priv_class = &wmv1_class,