2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "libavutil/timer.h"
46 #include "mpegvideo.h"
47 #include "mpegvideodata.h"
51 #include "mjpegenc_common.h"
53 #include "mpegutils.h"
56 #include "pixblockdsp.h"
60 #include "aandcttab.h"
62 #include "mpeg4video.h"
64 #include "bytestream.h"
70 #define QUANT_BIAS_SHIFT 8
72 #define QMAT_SHIFT_MMX 16
75 static int encode_picture(MpegEncContext *s, int picture_number);
76 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
77 static int sse_mb(MpegEncContext *s);
78 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
79 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
81 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
82 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
84 const AVOption ff_mpv_generic_options[] = {
89 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
90 uint16_t (*qmat16)[2][64],
91 const uint16_t *quant_matrix,
92 int bias, int qmin, int qmax, int intra)
94 FDCTDSPContext *fdsp = &s->fdsp;
98 for (qscale = qmin; qscale <= qmax; qscale++) {
102 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
103 else qscale2 = qscale << 1;
105 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
107 fdsp->fdct == ff_faandct ||
108 #endif /* CONFIG_FAANDCT */
109 fdsp->fdct == ff_jpeg_fdct_islow_10) {
110 for (i = 0; i < 64; i++) {
111 const int j = s->idsp.idct_permutation[i];
112 int64_t den = (int64_t) qscale2 * quant_matrix[j];
113 /* 16 <= qscale * quant_matrix[i] <= 7905
114 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
115 * 19952 <= x <= 249205026
116 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
117 * 3444240 >= (1 << 36) / (x) >= 275 */
119 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
121 } else if (fdsp->fdct == ff_fdct_ifast) {
122 for (i = 0; i < 64; i++) {
123 const int j = s->idsp.idct_permutation[i];
124 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
125 /* 16 <= qscale * quant_matrix[i] <= 7905
126 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
127 * 19952 <= x <= 249205026
128 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
129 * 3444240 >= (1 << 36) / (x) >= 275 */
131 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
134 for (i = 0; i < 64; i++) {
135 const int j = s->idsp.idct_permutation[i];
136 int64_t den = (int64_t) qscale2 * quant_matrix[j];
137 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
138 * Assume x = qscale * quant_matrix[i]
140 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
141 * so 32768 >= (1 << 19) / (x) >= 67 */
142 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
143 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
144 // (qscale * quant_matrix[i]);
145 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
147 if (qmat16[qscale][0][i] == 0 ||
148 qmat16[qscale][0][i] == 128 * 256)
149 qmat16[qscale][0][i] = 128 * 256 - 1;
150 qmat16[qscale][1][i] =
151 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
152 qmat16[qscale][0][i]);
156 for (i = intra; i < 64; i++) {
158 if (fdsp->fdct == ff_fdct_ifast) {
159 max = (8191LL * ff_aanscales[i]) >> 14;
161 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
167 av_log(NULL, AV_LOG_INFO,
168 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
173 static inline void update_qscale(MpegEncContext *s)
175 if (s->q_scale_type == 1 && 0) {
177 int bestdiff=INT_MAX;
180 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
181 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
182 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
183 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
185 if (diff < bestdiff) {
192 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
193 (FF_LAMBDA_SHIFT + 7);
194 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
197 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
201 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
207 for (i = 0; i < 64; i++) {
208 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
215 * init s->current_picture.qscale_table from s->lambda_table
217 void ff_init_qscale_tab(MpegEncContext *s)
219 int8_t * const qscale_table = s->current_picture.qscale_table;
222 for (i = 0; i < s->mb_num; i++) {
223 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
224 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
225 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
230 static void update_duplicate_context_after_me(MpegEncContext *dst,
233 #define COPY(a) dst->a= src->a
235 COPY(current_picture);
241 COPY(picture_in_gop_number);
242 COPY(gop_picture_number);
243 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
244 COPY(progressive_frame); // FIXME don't set in encode_header
245 COPY(partitioned_frame); // FIXME don't set in encode_header
250 * Set the given MpegEncContext to defaults for encoding.
251 * the changed fields will not depend upon the prior state of the MpegEncContext.
253 static void mpv_encode_defaults(MpegEncContext *s)
256 ff_mpv_common_defaults(s);
258 for (i = -16; i < 16; i++) {
259 default_fcode_tab[i + MAX_MV] = 1;
261 s->me.mv_penalty = default_mv_penalty;
262 s->fcode_tab = default_fcode_tab;
264 s->input_picture_number = 0;
265 s->picture_in_gop_number = 0;
268 av_cold int ff_dct_encode_init(MpegEncContext *s) {
270 ff_dct_encode_init_x86(s);
272 if (CONFIG_H263_ENCODER)
273 ff_h263dsp_init(&s->h263dsp);
274 if (!s->dct_quantize)
275 s->dct_quantize = ff_dct_quantize_c;
277 s->denoise_dct = denoise_dct_c;
278 s->fast_dct_quantize = s->dct_quantize;
279 if (s->avctx->trellis)
280 s->dct_quantize = dct_quantize_trellis_c;
285 /* init video encoder */
286 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
288 MpegEncContext *s = avctx->priv_data;
289 AVCPBProperties *cpb_props;
290 int i, ret, format_supported;
292 mpv_encode_defaults(s);
294 switch (avctx->codec_id) {
295 case AV_CODEC_ID_MPEG2VIDEO:
296 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
297 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
298 av_log(avctx, AV_LOG_ERROR,
299 "only YUV420 and YUV422 are supported\n");
303 case AV_CODEC_ID_MJPEG:
304 case AV_CODEC_ID_AMV:
305 format_supported = 0;
306 /* JPEG color space */
307 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
308 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
309 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
310 (avctx->color_range == AVCOL_RANGE_JPEG &&
311 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
312 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
313 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
314 format_supported = 1;
315 /* MPEG color space */
316 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
317 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
318 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
319 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
320 format_supported = 1;
322 if (!format_supported) {
323 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
328 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
329 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
334 switch (avctx->pix_fmt) {
335 case AV_PIX_FMT_YUVJ444P:
336 case AV_PIX_FMT_YUV444P:
337 s->chroma_format = CHROMA_444;
339 case AV_PIX_FMT_YUVJ422P:
340 case AV_PIX_FMT_YUV422P:
341 s->chroma_format = CHROMA_422;
343 case AV_PIX_FMT_YUVJ420P:
344 case AV_PIX_FMT_YUV420P:
346 s->chroma_format = CHROMA_420;
350 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
352 #if FF_API_PRIVATE_OPT
353 FF_DISABLE_DEPRECATION_WARNINGS
354 if (avctx->rtp_payload_size)
355 s->rtp_payload_size = avctx->rtp_payload_size;
356 FF_ENABLE_DEPRECATION_WARNINGS
359 s->bit_rate = avctx->bit_rate;
360 s->width = avctx->width;
361 s->height = avctx->height;
362 if (avctx->gop_size > 600 &&
363 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
364 av_log(avctx, AV_LOG_WARNING,
365 "keyframe interval too large!, reducing it from %d to %d\n",
366 avctx->gop_size, 600);
367 avctx->gop_size = 600;
369 s->gop_size = avctx->gop_size;
371 if (avctx->max_b_frames > MAX_B_FRAMES) {
372 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
373 "is %d.\n", MAX_B_FRAMES);
374 avctx->max_b_frames = MAX_B_FRAMES;
376 s->max_b_frames = avctx->max_b_frames;
377 s->codec_id = avctx->codec->id;
378 s->strict_std_compliance = avctx->strict_std_compliance;
379 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
380 s->rtp_mode = !!s->rtp_payload_size;
381 s->intra_dc_precision = avctx->intra_dc_precision;
383 // workaround some differences between how applications specify dc precision
384 if (s->intra_dc_precision < 0) {
385 s->intra_dc_precision += 8;
386 } else if (s->intra_dc_precision >= 8)
387 s->intra_dc_precision -= 8;
389 if (s->intra_dc_precision < 0) {
390 av_log(avctx, AV_LOG_ERROR,
391 "intra dc precision must be positive, note some applications use"
392 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
393 return AVERROR(EINVAL);
396 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
397 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
398 return AVERROR(EINVAL);
400 s->user_specified_pts = AV_NOPTS_VALUE;
402 if (s->gop_size <= 1) {
409 #if FF_API_MOTION_EST
410 FF_DISABLE_DEPRECATION_WARNINGS
411 s->me_method = avctx->me_method;
412 FF_ENABLE_DEPRECATION_WARNINGS
416 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
419 FF_DISABLE_DEPRECATION_WARNINGS
420 if (avctx->border_masking != 0.0)
421 s->border_masking = avctx->border_masking;
422 FF_ENABLE_DEPRECATION_WARNINGS
425 s->adaptive_quant = (s->avctx->lumi_masking ||
426 s->avctx->dark_masking ||
427 s->avctx->temporal_cplx_masking ||
428 s->avctx->spatial_cplx_masking ||
429 s->avctx->p_masking ||
431 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
434 s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
436 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
437 switch(avctx->codec_id) {
438 case AV_CODEC_ID_MPEG1VIDEO:
439 case AV_CODEC_ID_MPEG2VIDEO:
440 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
442 case AV_CODEC_ID_MPEG4:
443 case AV_CODEC_ID_MSMPEG4V1:
444 case AV_CODEC_ID_MSMPEG4V2:
445 case AV_CODEC_ID_MSMPEG4V3:
446 if (avctx->rc_max_rate >= 15000000) {
447 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
448 } else if(avctx->rc_max_rate >= 2000000) {
449 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
450 } else if(avctx->rc_max_rate >= 384000) {
451 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
453 avctx->rc_buffer_size = 40;
454 avctx->rc_buffer_size *= 16384;
457 if (avctx->rc_buffer_size) {
458 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
462 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
463 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
467 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
468 av_log(avctx, AV_LOG_INFO,
469 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
472 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
473 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
477 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
478 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
482 if (avctx->rc_max_rate &&
483 avctx->rc_max_rate == avctx->bit_rate &&
484 avctx->rc_max_rate != avctx->rc_min_rate) {
485 av_log(avctx, AV_LOG_INFO,
486 "impossible bitrate constraints, this will fail\n");
489 if (avctx->rc_buffer_size &&
490 avctx->bit_rate * (int64_t)avctx->time_base.num >
491 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
492 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
496 if (!s->fixed_qscale &&
497 avctx->bit_rate * av_q2d(avctx->time_base) >
498 avctx->bit_rate_tolerance) {
499 av_log(avctx, AV_LOG_WARNING,
500 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, (int64_t)avctx->bit_rate);
501 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
504 if (s->avctx->rc_max_rate &&
505 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
506 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
507 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
508 90000LL * (avctx->rc_buffer_size - 1) >
509 s->avctx->rc_max_rate * 0xFFFFLL) {
510 av_log(avctx, AV_LOG_INFO,
511 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
512 "specified vbv buffer is too large for the given bitrate!\n");
515 if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
516 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
517 s->codec_id != AV_CODEC_ID_FLV1) {
518 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
522 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
523 av_log(avctx, AV_LOG_ERROR,
524 "OBMC is only supported with simple mb decision\n");
528 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
529 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
533 if (s->max_b_frames &&
534 s->codec_id != AV_CODEC_ID_MPEG4 &&
535 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
536 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
537 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
540 if (s->max_b_frames < 0) {
541 av_log(avctx, AV_LOG_ERROR,
542 "max b frames must be 0 or positive for mpegvideo based encoders\n");
546 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
547 s->codec_id == AV_CODEC_ID_H263 ||
548 s->codec_id == AV_CODEC_ID_H263P) &&
549 (avctx->sample_aspect_ratio.num > 255 ||
550 avctx->sample_aspect_ratio.den > 255)) {
551 av_log(avctx, AV_LOG_WARNING,
552 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
553 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
554 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
555 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
558 if ((s->codec_id == AV_CODEC_ID_H263 ||
559 s->codec_id == AV_CODEC_ID_H263P) &&
560 (avctx->width > 2048 ||
561 avctx->height > 1152 )) {
562 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
565 if ((s->codec_id == AV_CODEC_ID_H263 ||
566 s->codec_id == AV_CODEC_ID_H263P) &&
567 ((avctx->width &3) ||
568 (avctx->height&3) )) {
569 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
573 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
574 (avctx->width > 4095 ||
575 avctx->height > 4095 )) {
576 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
580 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
581 (avctx->width > 16383 ||
582 avctx->height > 16383 )) {
583 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
587 if (s->codec_id == AV_CODEC_ID_RV10 &&
589 avctx->height&15 )) {
590 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
591 return AVERROR(EINVAL);
594 if (s->codec_id == AV_CODEC_ID_RV20 &&
597 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
598 return AVERROR(EINVAL);
601 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
602 s->codec_id == AV_CODEC_ID_WMV2) &&
604 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
608 if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
609 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
610 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
614 #if FF_API_PRIVATE_OPT
615 FF_DISABLE_DEPRECATION_WARNINGS
616 if (avctx->mpeg_quant)
617 s->mpeg_quant = avctx->mpeg_quant;
618 FF_ENABLE_DEPRECATION_WARNINGS
621 // FIXME mpeg2 uses that too
622 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
623 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
624 av_log(avctx, AV_LOG_ERROR,
625 "mpeg2 style quantization not supported by codec\n");
629 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
630 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
634 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
635 s->avctx->mb_decision != FF_MB_DECISION_RD) {
636 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
640 #if FF_API_PRIVATE_OPT
641 FF_DISABLE_DEPRECATION_WARNINGS
642 if (avctx->scenechange_threshold)
643 s->scenechange_threshold = avctx->scenechange_threshold;
644 FF_ENABLE_DEPRECATION_WARNINGS
647 if (s->scenechange_threshold < 1000000000 &&
648 (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
649 av_log(avctx, AV_LOG_ERROR,
650 "closed gop with scene change detection are not supported yet, "
651 "set threshold to 1000000000\n");
655 if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
656 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
657 av_log(avctx, AV_LOG_ERROR,
658 "low delay forcing is only available for mpeg2\n");
661 if (s->max_b_frames != 0) {
662 av_log(avctx, AV_LOG_ERROR,
663 "b frames cannot be used with low delay\n");
668 if (s->q_scale_type == 1) {
669 if (avctx->qmax > 28) {
670 av_log(avctx, AV_LOG_ERROR,
671 "non linear quant only supports qmax <= 28 currently\n");
676 if (avctx->slices > 1 &&
677 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
678 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
679 return AVERROR(EINVAL);
682 if (s->avctx->thread_count > 1 &&
683 s->codec_id != AV_CODEC_ID_MPEG4 &&
684 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
685 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
686 s->codec_id != AV_CODEC_ID_MJPEG &&
687 (s->codec_id != AV_CODEC_ID_H263P)) {
688 av_log(avctx, AV_LOG_ERROR,
689 "multi threaded encoding not supported by codec\n");
693 if (s->avctx->thread_count < 1) {
694 av_log(avctx, AV_LOG_ERROR,
695 "automatic thread number detection not supported by codec, "
700 if (!avctx->time_base.den || !avctx->time_base.num) {
701 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
705 #if FF_API_PRIVATE_OPT
706 FF_DISABLE_DEPRECATION_WARNINGS
707 if (avctx->b_frame_strategy)
708 s->b_frame_strategy = avctx->b_frame_strategy;
709 if (avctx->b_sensitivity != 40)
710 s->b_sensitivity = avctx->b_sensitivity;
711 FF_ENABLE_DEPRECATION_WARNINGS
714 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
715 av_log(avctx, AV_LOG_INFO,
716 "notice: b_frame_strategy only affects the first pass\n");
717 s->b_frame_strategy = 0;
720 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
722 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
723 avctx->time_base.den /= i;
724 avctx->time_base.num /= i;
728 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
729 // (a + x * 3 / 8) / x
730 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
731 s->inter_quant_bias = 0;
733 s->intra_quant_bias = 0;
735 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
738 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
739 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
740 return AVERROR(EINVAL);
743 #if FF_API_QUANT_BIAS
744 FF_DISABLE_DEPRECATION_WARNINGS
745 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
746 s->intra_quant_bias = avctx->intra_quant_bias;
747 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
748 s->inter_quant_bias = avctx->inter_quant_bias;
749 FF_ENABLE_DEPRECATION_WARNINGS
752 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
754 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
755 s->avctx->time_base.den > (1 << 16) - 1) {
756 av_log(avctx, AV_LOG_ERROR,
757 "timebase %d/%d not supported by MPEG 4 standard, "
758 "the maximum admitted value for the timebase denominator "
759 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
763 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
765 switch (avctx->codec->id) {
766 case AV_CODEC_ID_MPEG1VIDEO:
767 s->out_format = FMT_MPEG1;
768 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
769 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
771 case AV_CODEC_ID_MPEG2VIDEO:
772 s->out_format = FMT_MPEG1;
773 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
774 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
777 case AV_CODEC_ID_MJPEG:
778 case AV_CODEC_ID_AMV:
779 s->out_format = FMT_MJPEG;
780 s->intra_only = 1; /* force intra only for jpeg */
781 if (!CONFIG_MJPEG_ENCODER ||
782 ff_mjpeg_encode_init(s) < 0)
787 case AV_CODEC_ID_H261:
788 if (!CONFIG_H261_ENCODER)
790 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
791 av_log(avctx, AV_LOG_ERROR,
792 "The specified picture size of %dx%d is not valid for the "
793 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
794 s->width, s->height);
797 s->out_format = FMT_H261;
800 s->rtp_mode = 0; /* Sliced encoding not supported */
802 case AV_CODEC_ID_H263:
803 if (!CONFIG_H263_ENCODER)
805 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
806 s->width, s->height) == 8) {
807 av_log(avctx, AV_LOG_ERROR,
808 "The specified picture size of %dx%d is not valid for "
809 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
810 "352x288, 704x576, and 1408x1152. "
811 "Try H.263+.\n", s->width, s->height);
814 s->out_format = FMT_H263;
818 case AV_CODEC_ID_H263P:
819 s->out_format = FMT_H263;
822 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
823 s->modified_quant = s->h263_aic;
824 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
825 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
828 /* These are just to be sure */
832 case AV_CODEC_ID_FLV1:
833 s->out_format = FMT_H263;
834 s->h263_flv = 2; /* format = 1; 11-bit codes */
835 s->unrestricted_mv = 1;
836 s->rtp_mode = 0; /* don't allow GOB */
840 case AV_CODEC_ID_RV10:
841 s->out_format = FMT_H263;
845 case AV_CODEC_ID_RV20:
846 s->out_format = FMT_H263;
849 s->modified_quant = 1;
853 s->unrestricted_mv = 0;
855 case AV_CODEC_ID_MPEG4:
856 s->out_format = FMT_H263;
858 s->unrestricted_mv = 1;
859 s->low_delay = s->max_b_frames ? 0 : 1;
860 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
862 case AV_CODEC_ID_MSMPEG4V2:
863 s->out_format = FMT_H263;
865 s->unrestricted_mv = 1;
866 s->msmpeg4_version = 2;
870 case AV_CODEC_ID_MSMPEG4V3:
871 s->out_format = FMT_H263;
873 s->unrestricted_mv = 1;
874 s->msmpeg4_version = 3;
875 s->flipflop_rounding = 1;
879 case AV_CODEC_ID_WMV1:
880 s->out_format = FMT_H263;
882 s->unrestricted_mv = 1;
883 s->msmpeg4_version = 4;
884 s->flipflop_rounding = 1;
888 case AV_CODEC_ID_WMV2:
889 s->out_format = FMT_H263;
891 s->unrestricted_mv = 1;
892 s->msmpeg4_version = 5;
893 s->flipflop_rounding = 1;
901 #if FF_API_PRIVATE_OPT
902 FF_DISABLE_DEPRECATION_WARNINGS
903 if (avctx->noise_reduction)
904 s->noise_reduction = avctx->noise_reduction;
905 FF_ENABLE_DEPRECATION_WARNINGS
908 avctx->has_b_frames = !s->low_delay;
912 s->progressive_frame =
913 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
914 AV_CODEC_FLAG_INTERLACED_ME) ||
919 if (ff_mpv_common_init(s) < 0)
922 ff_fdctdsp_init(&s->fdsp, avctx);
923 ff_me_cmp_init(&s->mecc, avctx);
924 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
925 ff_pixblockdsp_init(&s->pdsp, avctx);
926 ff_qpeldsp_init(&s->qdsp);
928 if (s->msmpeg4_version) {
929 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
930 2 * 2 * (MAX_LEVEL + 1) *
931 (MAX_RUN + 1) * 2 * sizeof(int), fail);
933 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
935 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
936 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
937 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
938 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
939 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
940 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
941 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
942 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
943 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
944 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
947 if (s->noise_reduction) {
948 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
949 2 * 64 * sizeof(uint16_t), fail);
952 ff_dct_encode_init(s);
954 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
955 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
957 if (s->slice_context_count > 1) {
960 if (avctx->codec_id == AV_CODEC_ID_H263P)
961 s->h263_slice_structured = 1;
964 s->quant_precision = 5;
966 #if FF_API_PRIVATE_OPT
967 FF_DISABLE_DEPRECATION_WARNINGS
968 if (avctx->frame_skip_threshold)
969 s->frame_skip_threshold = avctx->frame_skip_threshold;
970 if (avctx->frame_skip_factor)
971 s->frame_skip_factor = avctx->frame_skip_factor;
972 if (avctx->frame_skip_exp)
973 s->frame_skip_exp = avctx->frame_skip_exp;
974 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
975 s->frame_skip_cmp = avctx->frame_skip_cmp;
976 FF_ENABLE_DEPRECATION_WARNINGS
979 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
980 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
982 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
983 ff_h261_encode_init(s);
984 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
985 ff_h263_encode_init(s);
986 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
987 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
989 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
990 && s->out_format == FMT_MPEG1)
991 ff_mpeg1_encode_init(s);
994 for (i = 0; i < 64; i++) {
995 int j = s->idsp.idct_permutation[i];
996 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
998 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
999 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1000 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1001 s->intra_matrix[j] =
1002 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1005 s->chroma_intra_matrix[j] =
1006 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1007 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1009 if (s->avctx->intra_matrix)
1010 s->intra_matrix[j] = s->avctx->intra_matrix[i];
1011 if (s->avctx->inter_matrix)
1012 s->inter_matrix[j] = s->avctx->inter_matrix[i];
1015 /* precompute matrix */
1016 /* for mjpeg, we do include qscale in the matrix */
1017 if (s->out_format != FMT_MJPEG) {
1018 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1019 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1021 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1022 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1026 if (ff_rate_control_init(s) < 0)
1029 #if FF_API_ERROR_RATE
1030 FF_DISABLE_DEPRECATION_WARNINGS
1031 if (avctx->error_rate)
1032 s->error_rate = avctx->error_rate;
1033 FF_ENABLE_DEPRECATION_WARNINGS;
1036 #if FF_API_NORMALIZE_AQP
1037 FF_DISABLE_DEPRECATION_WARNINGS
1038 if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
1039 s->mpv_flags |= FF_MPV_FLAG_NAQ;
1040 FF_ENABLE_DEPRECATION_WARNINGS;
1044 FF_DISABLE_DEPRECATION_WARNINGS
1045 if (avctx->flags & CODEC_FLAG_MV0)
1046 s->mpv_flags |= FF_MPV_FLAG_MV0;
1047 FF_ENABLE_DEPRECATION_WARNINGS
1051 FF_DISABLE_DEPRECATION_WARNINGS
1052 if (avctx->rc_qsquish != 0.0)
1053 s->rc_qsquish = avctx->rc_qsquish;
1054 if (avctx->rc_qmod_amp != 0.0)
1055 s->rc_qmod_amp = avctx->rc_qmod_amp;
1056 if (avctx->rc_qmod_freq)
1057 s->rc_qmod_freq = avctx->rc_qmod_freq;
1058 if (avctx->rc_buffer_aggressivity != 1.0)
1059 s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
1060 if (avctx->rc_initial_cplx != 0.0)
1061 s->rc_initial_cplx = avctx->rc_initial_cplx;
1063 s->lmin = avctx->lmin;
1065 s->lmax = avctx->lmax;
1068 av_freep(&s->rc_eq);
1069 s->rc_eq = av_strdup(avctx->rc_eq);
1071 return AVERROR(ENOMEM);
1073 FF_ENABLE_DEPRECATION_WARNINGS
1076 #if FF_API_PRIVATE_OPT
1077 FF_DISABLE_DEPRECATION_WARNINGS
1078 if (avctx->brd_scale)
1079 s->brd_scale = avctx->brd_scale;
1081 if (avctx->prediction_method)
1082 s->pred = avctx->prediction_method + 1;
1083 FF_ENABLE_DEPRECATION_WARNINGS
1086 if (s->b_frame_strategy == 2) {
1087 for (i = 0; i < s->max_b_frames + 2; i++) {
1088 s->tmp_frames[i] = av_frame_alloc();
1089 if (!s->tmp_frames[i])
1090 return AVERROR(ENOMEM);
1092 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1093 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1094 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1096 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1102 cpb_props = ff_add_cpb_side_data(avctx);
1104 return AVERROR(ENOMEM);
1105 cpb_props->max_bitrate = avctx->rc_max_rate;
1106 cpb_props->min_bitrate = avctx->rc_min_rate;
1107 cpb_props->avg_bitrate = avctx->bit_rate;
1108 cpb_props->buffer_size = avctx->rc_buffer_size;
1112 ff_mpv_encode_end(avctx);
1113 return AVERROR_UNKNOWN;
1116 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1118 MpegEncContext *s = avctx->priv_data;
1121 ff_rate_control_uninit(s);
1123 ff_mpv_common_end(s);
1124 if (CONFIG_MJPEG_ENCODER &&
1125 s->out_format == FMT_MJPEG)
1126 ff_mjpeg_encode_close(s);
1128 av_freep(&avctx->extradata);
1130 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1131 av_frame_free(&s->tmp_frames[i]);
1133 ff_free_picture_tables(&s->new_picture);
1134 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1136 av_freep(&s->avctx->stats_out);
1137 av_freep(&s->ac_stats);
1139 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1140 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1141 s->q_chroma_intra_matrix= NULL;
1142 s->q_chroma_intra_matrix16= NULL;
1143 av_freep(&s->q_intra_matrix);
1144 av_freep(&s->q_inter_matrix);
1145 av_freep(&s->q_intra_matrix16);
1146 av_freep(&s->q_inter_matrix16);
1147 av_freep(&s->input_picture);
1148 av_freep(&s->reordered_input_picture);
1149 av_freep(&s->dct_offset);
1154 static int get_sae(uint8_t *src, int ref, int stride)
1159 for (y = 0; y < 16; y++) {
1160 for (x = 0; x < 16; x++) {
1161 acc += FFABS(src[x + y * stride] - ref);
1168 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1169 uint8_t *ref, int stride)
1175 h = s->height & ~15;
1177 for (y = 0; y < h; y += 16) {
1178 for (x = 0; x < w; x += 16) {
1179 int offset = x + y * stride;
1180 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1182 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1183 int sae = get_sae(src + offset, mean, stride);
1185 acc += sae + 500 < sad;
1191 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1193 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1194 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1195 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1196 &s->linesize, &s->uvlinesize);
1199 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1201 Picture *pic = NULL;
1203 int i, display_picture_number = 0, ret;
1204 int encoding_delay = s->max_b_frames ? s->max_b_frames
1205 : (s->low_delay ? 0 : 1);
1206 int flush_offset = 1;
1211 display_picture_number = s->input_picture_number++;
1213 if (pts != AV_NOPTS_VALUE) {
1214 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1215 int64_t last = s->user_specified_pts;
1218 av_log(s->avctx, AV_LOG_ERROR,
1219 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1221 return AVERROR(EINVAL);
1224 if (!s->low_delay && display_picture_number == 1)
1225 s->dts_delta = pts - last;
1227 s->user_specified_pts = pts;
1229 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1230 s->user_specified_pts =
1231 pts = s->user_specified_pts + 1;
1232 av_log(s->avctx, AV_LOG_INFO,
1233 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1236 pts = display_picture_number;
1240 if (!pic_arg->buf[0] ||
1241 pic_arg->linesize[0] != s->linesize ||
1242 pic_arg->linesize[1] != s->uvlinesize ||
1243 pic_arg->linesize[2] != s->uvlinesize)
1245 if ((s->width & 15) || (s->height & 15))
1247 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1249 if (s->linesize & (STRIDE_ALIGN-1))
1252 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1253 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1255 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1259 pic = &s->picture[i];
1263 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1266 ret = alloc_picture(s, pic, direct);
1271 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1272 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1273 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1276 int h_chroma_shift, v_chroma_shift;
1277 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1281 for (i = 0; i < 3; i++) {
1282 int src_stride = pic_arg->linesize[i];
1283 int dst_stride = i ? s->uvlinesize : s->linesize;
1284 int h_shift = i ? h_chroma_shift : 0;
1285 int v_shift = i ? v_chroma_shift : 0;
1286 int w = s->width >> h_shift;
1287 int h = s->height >> v_shift;
1288 uint8_t *src = pic_arg->data[i];
1289 uint8_t *dst = pic->f->data[i];
1292 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1293 && !s->progressive_sequence
1294 && FFALIGN(s->height, 32) - s->height > 16)
1297 if (!s->avctx->rc_buffer_size)
1298 dst += INPLACE_OFFSET;
1300 if (src_stride == dst_stride)
1301 memcpy(dst, src, src_stride * h);
1304 uint8_t *dst2 = dst;
1306 memcpy(dst2, src, w);
1311 if ((s->width & 15) || (s->height & (vpad-1))) {
1312 s->mpvencdsp.draw_edges(dst, dst_stride,
1321 ret = av_frame_copy_props(pic->f, pic_arg);
1325 pic->f->display_picture_number = display_picture_number;
1326 pic->f->pts = pts; // we set this here to avoid modifiying pic_arg
1328 /* Flushing: When we have not received enough input frames,
1329 * ensure s->input_picture[0] contains the first picture */
1330 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1331 if (s->input_picture[flush_offset])
1334 if (flush_offset <= 1)
1337 encoding_delay = encoding_delay - flush_offset + 1;
1340 /* shift buffer entries */
1341 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1342 s->input_picture[i - flush_offset] = s->input_picture[i];
1344 s->input_picture[encoding_delay] = (Picture*) pic;
1349 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1353 int64_t score64 = 0;
1355 for (plane = 0; plane < 3; plane++) {
1356 const int stride = p->f->linesize[plane];
1357 const int bw = plane ? 1 : 2;
1358 for (y = 0; y < s->mb_height * bw; y++) {
1359 for (x = 0; x < s->mb_width * bw; x++) {
1360 int off = p->shared ? 0 : 16;
1361 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1362 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1363 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1365 switch (FFABS(s->frame_skip_exp)) {
1366 case 0: score = FFMAX(score, v); break;
1367 case 1: score += FFABS(v); break;
1368 case 2: score64 += v * (int64_t)v; break;
1369 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1370 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1379 if (s->frame_skip_exp < 0)
1380 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1381 -1.0/s->frame_skip_exp);
1383 if (score64 < s->frame_skip_threshold)
1385 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1390 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1392 AVPacket pkt = { 0 };
1393 int ret, got_output;
1395 av_init_packet(&pkt);
1396 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1401 av_packet_unref(&pkt);
1405 static int estimate_best_b_count(MpegEncContext *s)
1407 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1408 AVCodecContext *c = avcodec_alloc_context3(NULL);
1409 const int scale = s->brd_scale;
1410 int i, j, out_size, p_lambda, b_lambda, lambda2;
1411 int64_t best_rd = INT64_MAX;
1412 int best_b_count = -1;
1415 return AVERROR(ENOMEM);
1416 av_assert0(scale >= 0 && scale <= 3);
1419 //s->next_picture_ptr->quality;
1420 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1421 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1422 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1423 if (!b_lambda) // FIXME we should do this somewhere else
1424 b_lambda = p_lambda;
1425 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1428 c->width = s->width >> scale;
1429 c->height = s->height >> scale;
1430 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1431 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1432 c->mb_decision = s->avctx->mb_decision;
1433 c->me_cmp = s->avctx->me_cmp;
1434 c->mb_cmp = s->avctx->mb_cmp;
1435 c->me_sub_cmp = s->avctx->me_sub_cmp;
1436 c->pix_fmt = AV_PIX_FMT_YUV420P;
1437 c->time_base = s->avctx->time_base;
1438 c->max_b_frames = s->max_b_frames;
1440 if (avcodec_open2(c, codec, NULL) < 0)
1443 for (i = 0; i < s->max_b_frames + 2; i++) {
1444 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1445 s->next_picture_ptr;
1448 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1449 pre_input = *pre_input_ptr;
1450 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1452 if (!pre_input.shared && i) {
1453 data[0] += INPLACE_OFFSET;
1454 data[1] += INPLACE_OFFSET;
1455 data[2] += INPLACE_OFFSET;
1458 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1459 s->tmp_frames[i]->linesize[0],
1461 pre_input.f->linesize[0],
1462 c->width, c->height);
1463 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1464 s->tmp_frames[i]->linesize[1],
1466 pre_input.f->linesize[1],
1467 c->width >> 1, c->height >> 1);
1468 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1469 s->tmp_frames[i]->linesize[2],
1471 pre_input.f->linesize[2],
1472 c->width >> 1, c->height >> 1);
1476 for (j = 0; j < s->max_b_frames + 1; j++) {
1479 if (!s->input_picture[j])
1482 c->error[0] = c->error[1] = c->error[2] = 0;
1484 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1485 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1487 out_size = encode_frame(c, s->tmp_frames[0]);
1489 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1491 for (i = 0; i < s->max_b_frames + 1; i++) {
1492 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1494 s->tmp_frames[i + 1]->pict_type = is_p ?
1495 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1496 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1498 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1500 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1503 /* get the delayed frames */
1505 out_size = encode_frame(c, NULL);
1506 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1509 rd += c->error[0] + c->error[1] + c->error[2];
1520 return best_b_count;
1523 static int select_input_picture(MpegEncContext *s)
1527 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1528 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1529 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1531 /* set next picture type & ordering */
1532 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1533 if (s->frame_skip_threshold || s->frame_skip_factor) {
1534 if (s->picture_in_gop_number < s->gop_size &&
1535 s->next_picture_ptr &&
1536 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1537 // FIXME check that te gop check above is +-1 correct
1538 av_frame_unref(s->input_picture[0]->f);
1540 ff_vbv_update(s, 0);
1546 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1547 !s->next_picture_ptr || s->intra_only) {
1548 s->reordered_input_picture[0] = s->input_picture[0];
1549 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1550 s->reordered_input_picture[0]->f->coded_picture_number =
1551 s->coded_picture_number++;
1555 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1556 for (i = 0; i < s->max_b_frames + 1; i++) {
1557 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1559 if (pict_num >= s->rc_context.num_entries)
1561 if (!s->input_picture[i]) {
1562 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1566 s->input_picture[i]->f->pict_type =
1567 s->rc_context.entry[pict_num].new_pict_type;
1571 if (s->b_frame_strategy == 0) {
1572 b_frames = s->max_b_frames;
1573 while (b_frames && !s->input_picture[b_frames])
1575 } else if (s->b_frame_strategy == 1) {
1576 for (i = 1; i < s->max_b_frames + 1; i++) {
1577 if (s->input_picture[i] &&
1578 s->input_picture[i]->b_frame_score == 0) {
1579 s->input_picture[i]->b_frame_score =
1581 s->input_picture[i ]->f->data[0],
1582 s->input_picture[i - 1]->f->data[0],
1586 for (i = 0; i < s->max_b_frames + 1; i++) {
1587 if (!s->input_picture[i] ||
1588 s->input_picture[i]->b_frame_score - 1 >
1589 s->mb_num / s->b_sensitivity)
1593 b_frames = FFMAX(0, i - 1);
1596 for (i = 0; i < b_frames + 1; i++) {
1597 s->input_picture[i]->b_frame_score = 0;
1599 } else if (s->b_frame_strategy == 2) {
1600 b_frames = estimate_best_b_count(s);
1605 for (i = b_frames - 1; i >= 0; i--) {
1606 int type = s->input_picture[i]->f->pict_type;
1607 if (type && type != AV_PICTURE_TYPE_B)
1610 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1611 b_frames == s->max_b_frames) {
1612 av_log(s->avctx, AV_LOG_ERROR,
1613 "warning, too many b frames in a row\n");
1616 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1617 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1618 s->gop_size > s->picture_in_gop_number) {
1619 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1621 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1623 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1627 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1628 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1631 s->reordered_input_picture[0] = s->input_picture[b_frames];
1632 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1633 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1634 s->reordered_input_picture[0]->f->coded_picture_number =
1635 s->coded_picture_number++;
1636 for (i = 0; i < b_frames; i++) {
1637 s->reordered_input_picture[i + 1] = s->input_picture[i];
1638 s->reordered_input_picture[i + 1]->f->pict_type =
1640 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1641 s->coded_picture_number++;
1646 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1648 if (s->reordered_input_picture[0]) {
1649 s->reordered_input_picture[0]->reference =
1650 s->reordered_input_picture[0]->f->pict_type !=
1651 AV_PICTURE_TYPE_B ? 3 : 0;
1653 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1656 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1657 // input is a shared pix, so we can't modifiy it -> alloc a new
1658 // one & ensure that the shared one is reuseable
1661 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1664 pic = &s->picture[i];
1666 pic->reference = s->reordered_input_picture[0]->reference;
1667 if (alloc_picture(s, pic, 0) < 0) {
1671 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1675 /* mark us unused / free shared pic */
1676 av_frame_unref(s->reordered_input_picture[0]->f);
1677 s->reordered_input_picture[0]->shared = 0;
1679 s->current_picture_ptr = pic;
1681 // input is not a shared pix -> reuse buffer for current_pix
1682 s->current_picture_ptr = s->reordered_input_picture[0];
1683 for (i = 0; i < 4; i++) {
1684 s->new_picture.f->data[i] += INPLACE_OFFSET;
1687 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1688 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1689 s->current_picture_ptr)) < 0)
1692 s->picture_number = s->new_picture.f->display_picture_number;
1697 static void frame_end(MpegEncContext *s)
1699 if (s->unrestricted_mv &&
1700 s->current_picture.reference &&
1702 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1703 int hshift = desc->log2_chroma_w;
1704 int vshift = desc->log2_chroma_h;
1705 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1706 s->current_picture.f->linesize[0],
1707 s->h_edge_pos, s->v_edge_pos,
1708 EDGE_WIDTH, EDGE_WIDTH,
1709 EDGE_TOP | EDGE_BOTTOM);
1710 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1711 s->current_picture.f->linesize[1],
1712 s->h_edge_pos >> hshift,
1713 s->v_edge_pos >> vshift,
1714 EDGE_WIDTH >> hshift,
1715 EDGE_WIDTH >> vshift,
1716 EDGE_TOP | EDGE_BOTTOM);
1717 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1718 s->current_picture.f->linesize[2],
1719 s->h_edge_pos >> hshift,
1720 s->v_edge_pos >> vshift,
1721 EDGE_WIDTH >> hshift,
1722 EDGE_WIDTH >> vshift,
1723 EDGE_TOP | EDGE_BOTTOM);
1728 s->last_pict_type = s->pict_type;
1729 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1730 if (s->pict_type!= AV_PICTURE_TYPE_B)
1731 s->last_non_b_pict_type = s->pict_type;
1733 #if FF_API_CODED_FRAME
1734 FF_DISABLE_DEPRECATION_WARNINGS
1735 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1736 FF_ENABLE_DEPRECATION_WARNINGS
1738 #if FF_API_ERROR_FRAME
1739 FF_DISABLE_DEPRECATION_WARNINGS
1740 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1741 sizeof(s->current_picture.encoding_error));
1742 FF_ENABLE_DEPRECATION_WARNINGS
1746 static void update_noise_reduction(MpegEncContext *s)
1750 for (intra = 0; intra < 2; intra++) {
1751 if (s->dct_count[intra] > (1 << 16)) {
1752 for (i = 0; i < 64; i++) {
1753 s->dct_error_sum[intra][i] >>= 1;
1755 s->dct_count[intra] >>= 1;
1758 for (i = 0; i < 64; i++) {
1759 s->dct_offset[intra][i] = (s->noise_reduction *
1760 s->dct_count[intra] +
1761 s->dct_error_sum[intra][i] / 2) /
1762 (s->dct_error_sum[intra][i] + 1);
1767 static int frame_start(MpegEncContext *s)
1771 /* mark & release old frames */
1772 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1773 s->last_picture_ptr != s->next_picture_ptr &&
1774 s->last_picture_ptr->f->buf[0]) {
1775 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1778 s->current_picture_ptr->f->pict_type = s->pict_type;
1779 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1781 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1782 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1783 s->current_picture_ptr)) < 0)
1786 if (s->pict_type != AV_PICTURE_TYPE_B) {
1787 s->last_picture_ptr = s->next_picture_ptr;
1789 s->next_picture_ptr = s->current_picture_ptr;
1792 if (s->last_picture_ptr) {
1793 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1794 if (s->last_picture_ptr->f->buf[0] &&
1795 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1796 s->last_picture_ptr)) < 0)
1799 if (s->next_picture_ptr) {
1800 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1801 if (s->next_picture_ptr->f->buf[0] &&
1802 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1803 s->next_picture_ptr)) < 0)
1807 if (s->picture_structure!= PICT_FRAME) {
1809 for (i = 0; i < 4; i++) {
1810 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1811 s->current_picture.f->data[i] +=
1812 s->current_picture.f->linesize[i];
1814 s->current_picture.f->linesize[i] *= 2;
1815 s->last_picture.f->linesize[i] *= 2;
1816 s->next_picture.f->linesize[i] *= 2;
1820 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1821 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1822 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1823 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1824 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1825 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1827 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1828 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1831 if (s->dct_error_sum) {
1832 av_assert2(s->noise_reduction && s->encoding);
1833 update_noise_reduction(s);
1839 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1840 const AVFrame *pic_arg, int *got_packet)
1842 MpegEncContext *s = avctx->priv_data;
1843 int i, stuffing_count, ret;
1844 int context_count = s->slice_context_count;
1846 s->vbv_ignore_qmax = 0;
1848 s->picture_in_gop_number++;
1850 if (load_input_picture(s, pic_arg) < 0)
1853 if (select_input_picture(s) < 0) {
1858 if (s->new_picture.f->data[0]) {
1859 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1860 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1862 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1863 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1866 s->mb_info_ptr = av_packet_new_side_data(pkt,
1867 AV_PKT_DATA_H263_MB_INFO,
1868 s->mb_width*s->mb_height*12);
1869 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1872 for (i = 0; i < context_count; i++) {
1873 int start_y = s->thread_context[i]->start_mb_y;
1874 int end_y = s->thread_context[i]-> end_mb_y;
1875 int h = s->mb_height;
1876 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1877 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1879 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1882 s->pict_type = s->new_picture.f->pict_type;
1884 ret = frame_start(s);
1888 ret = encode_picture(s, s->picture_number);
1889 if (growing_buffer) {
1890 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1891 pkt->data = s->pb.buf;
1892 pkt->size = avctx->internal->byte_buffer_size;
1897 #if FF_API_STAT_BITS
1898 FF_DISABLE_DEPRECATION_WARNINGS
1899 avctx->header_bits = s->header_bits;
1900 avctx->mv_bits = s->mv_bits;
1901 avctx->misc_bits = s->misc_bits;
1902 avctx->i_tex_bits = s->i_tex_bits;
1903 avctx->p_tex_bits = s->p_tex_bits;
1904 avctx->i_count = s->i_count;
1905 // FIXME f/b_count in avctx
1906 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1907 avctx->skip_count = s->skip_count;
1908 FF_ENABLE_DEPRECATION_WARNINGS
1913 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1914 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1916 if (avctx->rc_buffer_size) {
1917 RateControlContext *rcc = &s->rc_context;
1918 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1919 int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1920 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1922 if (put_bits_count(&s->pb) > max_size &&
1923 s->lambda < s->lmax) {
1924 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1925 (s->qscale + 1) / s->qscale);
1926 if (s->adaptive_quant) {
1928 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1929 s->lambda_table[i] =
1930 FFMAX(s->lambda_table[i] + min_step,
1931 s->lambda_table[i] * (s->qscale + 1) /
1934 s->mb_skipped = 0; // done in frame_start()
1935 // done in encode_picture() so we must undo it
1936 if (s->pict_type == AV_PICTURE_TYPE_P) {
1937 if (s->flipflop_rounding ||
1938 s->codec_id == AV_CODEC_ID_H263P ||
1939 s->codec_id == AV_CODEC_ID_MPEG4)
1940 s->no_rounding ^= 1;
1942 if (s->pict_type != AV_PICTURE_TYPE_B) {
1943 s->time_base = s->last_time_base;
1944 s->last_non_b_time = s->time - s->pp_time;
1946 for (i = 0; i < context_count; i++) {
1947 PutBitContext *pb = &s->thread_context[i]->pb;
1948 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1950 s->vbv_ignore_qmax = 1;
1951 av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1955 av_assert0(s->avctx->rc_max_rate);
1958 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1959 ff_write_pass1_stats(s);
1961 for (i = 0; i < 4; i++) {
1962 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1963 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1965 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1966 s->current_picture_ptr->encoding_error,
1967 (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1970 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1971 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1972 s->misc_bits + s->i_tex_bits +
1974 flush_put_bits(&s->pb);
1975 s->frame_bits = put_bits_count(&s->pb);
1977 stuffing_count = ff_vbv_update(s, s->frame_bits);
1978 s->stuffing_bits = 8*stuffing_count;
1979 if (stuffing_count) {
1980 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1981 stuffing_count + 50) {
1982 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1986 switch (s->codec_id) {
1987 case AV_CODEC_ID_MPEG1VIDEO:
1988 case AV_CODEC_ID_MPEG2VIDEO:
1989 while (stuffing_count--) {
1990 put_bits(&s->pb, 8, 0);
1993 case AV_CODEC_ID_MPEG4:
1994 put_bits(&s->pb, 16, 0);
1995 put_bits(&s->pb, 16, 0x1C3);
1996 stuffing_count -= 4;
1997 while (stuffing_count--) {
1998 put_bits(&s->pb, 8, 0xFF);
2002 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2004 flush_put_bits(&s->pb);
2005 s->frame_bits = put_bits_count(&s->pb);
2008 /* update mpeg1/2 vbv_delay for CBR */
2009 if (s->avctx->rc_max_rate &&
2010 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
2011 s->out_format == FMT_MPEG1 &&
2012 90000LL * (avctx->rc_buffer_size - 1) <=
2013 s->avctx->rc_max_rate * 0xFFFFLL) {
2014 AVCPBProperties *props;
2017 int vbv_delay, min_delay;
2018 double inbits = s->avctx->rc_max_rate *
2019 av_q2d(s->avctx->time_base);
2020 int minbits = s->frame_bits - 8 *
2021 (s->vbv_delay_ptr - s->pb.buf - 1);
2022 double bits = s->rc_context.buffer_index + minbits - inbits;
2025 av_log(s->avctx, AV_LOG_ERROR,
2026 "Internal error, negative bits\n");
2028 assert(s->repeat_first_field == 0);
2030 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
2031 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
2032 s->avctx->rc_max_rate;
2034 vbv_delay = FFMAX(vbv_delay, min_delay);
2036 av_assert0(vbv_delay < 0xFFFF);
2038 s->vbv_delay_ptr[0] &= 0xF8;
2039 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2040 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2041 s->vbv_delay_ptr[2] &= 0x07;
2042 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2044 props = av_cpb_properties_alloc(&props_size);
2046 return AVERROR(ENOMEM);
2047 props->vbv_delay = vbv_delay * 300;
2049 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2050 (uint8_t*)props, props_size);
2056 #if FF_API_VBV_DELAY
2057 FF_DISABLE_DEPRECATION_WARNINGS
2058 avctx->vbv_delay = vbv_delay * 300;
2059 FF_ENABLE_DEPRECATION_WARNINGS
2062 s->total_bits += s->frame_bits;
2063 #if FF_API_STAT_BITS
2064 FF_DISABLE_DEPRECATION_WARNINGS
2065 avctx->frame_bits = s->frame_bits;
2066 FF_ENABLE_DEPRECATION_WARNINGS
2070 pkt->pts = s->current_picture.f->pts;
2071 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2072 if (!s->current_picture.f->coded_picture_number)
2073 pkt->dts = pkt->pts - s->dts_delta;
2075 pkt->dts = s->reordered_pts;
2076 s->reordered_pts = pkt->pts;
2078 pkt->dts = pkt->pts;
2079 if (s->current_picture.f->key_frame)
2080 pkt->flags |= AV_PKT_FLAG_KEY;
2082 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2087 /* release non-reference frames */
2088 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2089 if (!s->picture[i].reference)
2090 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2093 av_assert1((s->frame_bits & 7) == 0);
2095 pkt->size = s->frame_bits / 8;
2096 *got_packet = !!pkt->size;
2100 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2101 int n, int threshold)
2103 static const char tab[64] = {
2104 3, 2, 2, 1, 1, 1, 1, 1,
2105 1, 1, 1, 1, 1, 1, 1, 1,
2106 1, 1, 1, 1, 1, 1, 1, 1,
2107 0, 0, 0, 0, 0, 0, 0, 0,
2108 0, 0, 0, 0, 0, 0, 0, 0,
2109 0, 0, 0, 0, 0, 0, 0, 0,
2110 0, 0, 0, 0, 0, 0, 0, 0,
2111 0, 0, 0, 0, 0, 0, 0, 0
2116 int16_t *block = s->block[n];
2117 const int last_index = s->block_last_index[n];
2120 if (threshold < 0) {
2122 threshold = -threshold;
2126 /* Are all we could set to zero already zero? */
2127 if (last_index <= skip_dc - 1)
2130 for (i = 0; i <= last_index; i++) {
2131 const int j = s->intra_scantable.permutated[i];
2132 const int level = FFABS(block[j]);
2134 if (skip_dc && i == 0)
2138 } else if (level > 1) {
2144 if (score >= threshold)
2146 for (i = skip_dc; i <= last_index; i++) {
2147 const int j = s->intra_scantable.permutated[i];
2151 s->block_last_index[n] = 0;
2153 s->block_last_index[n] = -1;
2156 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2160 const int maxlevel = s->max_qcoeff;
2161 const int minlevel = s->min_qcoeff;
2165 i = 1; // skip clipping of intra dc
2169 for (; i <= last_index; i++) {
2170 const int j = s->intra_scantable.permutated[i];
2171 int level = block[j];
2173 if (level > maxlevel) {
2176 } else if (level < minlevel) {
2184 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2185 av_log(s->avctx, AV_LOG_INFO,
2186 "warning, clipping %d dct coefficients to %d..%d\n",
2187 overflow, minlevel, maxlevel);
2190 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2194 for (y = 0; y < 8; y++) {
2195 for (x = 0; x < 8; x++) {
2201 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2202 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2203 int v = ptr[x2 + y2 * stride];
2209 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2214 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2215 int motion_x, int motion_y,
2216 int mb_block_height,
2220 int16_t weight[12][64];
2221 int16_t orig[12][64];
2222 const int mb_x = s->mb_x;
2223 const int mb_y = s->mb_y;
2226 int dct_offset = s->linesize * 8; // default for progressive frames
2227 int uv_dct_offset = s->uvlinesize * 8;
2228 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2229 ptrdiff_t wrap_y, wrap_c;
2231 for (i = 0; i < mb_block_count; i++)
2232 skip_dct[i] = s->skipdct;
2234 if (s->adaptive_quant) {
2235 const int last_qp = s->qscale;
2236 const int mb_xy = mb_x + mb_y * s->mb_stride;
2238 s->lambda = s->lambda_table[mb_xy];
2241 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2242 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2243 s->dquant = s->qscale - last_qp;
2245 if (s->out_format == FMT_H263) {
2246 s->dquant = av_clip(s->dquant, -2, 2);
2248 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2250 if (s->pict_type == AV_PICTURE_TYPE_B) {
2251 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2254 if (s->mv_type == MV_TYPE_8X8)
2260 ff_set_qscale(s, last_qp + s->dquant);
2261 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2262 ff_set_qscale(s, s->qscale + s->dquant);
2264 wrap_y = s->linesize;
2265 wrap_c = s->uvlinesize;
2266 ptr_y = s->new_picture.f->data[0] +
2267 (mb_y * 16 * wrap_y) + mb_x * 16;
2268 ptr_cb = s->new_picture.f->data[1] +
2269 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2270 ptr_cr = s->new_picture.f->data[2] +
2271 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2273 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2274 uint8_t *ebuf = s->sc.edge_emu_buffer + 36 * wrap_y;
2275 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2276 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2277 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2279 16, 16, mb_x * 16, mb_y * 16,
2280 s->width, s->height);
2282 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2284 mb_block_width, mb_block_height,
2285 mb_x * mb_block_width, mb_y * mb_block_height,
2287 ptr_cb = ebuf + 16 * wrap_y;
2288 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2290 mb_block_width, mb_block_height,
2291 mb_x * mb_block_width, mb_y * mb_block_height,
2293 ptr_cr = ebuf + 16 * wrap_y + 16;
2297 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2298 int progressive_score, interlaced_score;
2300 s->interlaced_dct = 0;
2301 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2302 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2303 NULL, wrap_y, 8) - 400;
2305 if (progressive_score > 0) {
2306 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2307 NULL, wrap_y * 2, 8) +
2308 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2309 NULL, wrap_y * 2, 8);
2310 if (progressive_score > interlaced_score) {
2311 s->interlaced_dct = 1;
2313 dct_offset = wrap_y;
2314 uv_dct_offset = wrap_c;
2316 if (s->chroma_format == CHROMA_422 ||
2317 s->chroma_format == CHROMA_444)
2323 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2324 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2325 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2326 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2328 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2332 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2333 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2334 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2335 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2336 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2337 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2338 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2339 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2340 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2341 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2342 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2343 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2347 op_pixels_func (*op_pix)[4];
2348 qpel_mc_func (*op_qpix)[16];
2349 uint8_t *dest_y, *dest_cb, *dest_cr;
2351 dest_y = s->dest[0];
2352 dest_cb = s->dest[1];
2353 dest_cr = s->dest[2];
2355 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2356 op_pix = s->hdsp.put_pixels_tab;
2357 op_qpix = s->qdsp.put_qpel_pixels_tab;
2359 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2360 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2363 if (s->mv_dir & MV_DIR_FORWARD) {
2364 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2365 s->last_picture.f->data,
2367 op_pix = s->hdsp.avg_pixels_tab;
2368 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2370 if (s->mv_dir & MV_DIR_BACKWARD) {
2371 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2372 s->next_picture.f->data,
2376 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2377 int progressive_score, interlaced_score;
2379 s->interlaced_dct = 0;
2380 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2381 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2385 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2386 progressive_score -= 400;
2388 if (progressive_score > 0) {
2389 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2391 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2395 if (progressive_score > interlaced_score) {
2396 s->interlaced_dct = 1;
2398 dct_offset = wrap_y;
2399 uv_dct_offset = wrap_c;
2401 if (s->chroma_format == CHROMA_422)
2407 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2408 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2409 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2410 dest_y + dct_offset, wrap_y);
2411 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2412 dest_y + dct_offset + 8, wrap_y);
2414 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2418 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2419 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2420 if (!s->chroma_y_shift) { /* 422 */
2421 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2422 dest_cb + uv_dct_offset, wrap_c);
2423 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2424 dest_cr + uv_dct_offset, wrap_c);
2427 /* pre quantization */
2428 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2429 2 * s->qscale * s->qscale) {
2431 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2433 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2435 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2436 wrap_y, 8) < 20 * s->qscale)
2438 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2439 wrap_y, 8) < 20 * s->qscale)
2441 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2443 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2445 if (!s->chroma_y_shift) { /* 422 */
2446 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2447 dest_cb + uv_dct_offset,
2448 wrap_c, 8) < 20 * s->qscale)
2450 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2451 dest_cr + uv_dct_offset,
2452 wrap_c, 8) < 20 * s->qscale)
2458 if (s->quantizer_noise_shaping) {
2460 get_visual_weight(weight[0], ptr_y , wrap_y);
2462 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2464 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2466 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2468 get_visual_weight(weight[4], ptr_cb , wrap_c);
2470 get_visual_weight(weight[5], ptr_cr , wrap_c);
2471 if (!s->chroma_y_shift) { /* 422 */
2473 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2476 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2479 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2482 /* DCT & quantize */
2483 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2485 for (i = 0; i < mb_block_count; i++) {
2488 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2489 // FIXME we could decide to change to quantizer instead of
2491 // JS: I don't think that would be a good idea it could lower
2492 // quality instead of improve it. Just INTRADC clipping
2493 // deserves changes in quantizer
2495 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2497 s->block_last_index[i] = -1;
2499 if (s->quantizer_noise_shaping) {
2500 for (i = 0; i < mb_block_count; i++) {
2502 s->block_last_index[i] =
2503 dct_quantize_refine(s, s->block[i], weight[i],
2504 orig[i], i, s->qscale);
2509 if (s->luma_elim_threshold && !s->mb_intra)
2510 for (i = 0; i < 4; i++)
2511 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2512 if (s->chroma_elim_threshold && !s->mb_intra)
2513 for (i = 4; i < mb_block_count; i++)
2514 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2516 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2517 for (i = 0; i < mb_block_count; i++) {
2518 if (s->block_last_index[i] == -1)
2519 s->coded_score[i] = INT_MAX / 256;
2524 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2525 s->block_last_index[4] =
2526 s->block_last_index[5] = 0;
2528 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2529 if (!s->chroma_y_shift) { /* 422 / 444 */
2530 for (i=6; i<12; i++) {
2531 s->block_last_index[i] = 0;
2532 s->block[i][0] = s->block[4][0];
2537 // non c quantize code returns incorrect block_last_index FIXME
2538 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2539 for (i = 0; i < mb_block_count; i++) {
2541 if (s->block_last_index[i] > 0) {
2542 for (j = 63; j > 0; j--) {
2543 if (s->block[i][s->intra_scantable.permutated[j]])
2546 s->block_last_index[i] = j;
2551 /* huffman encode */
2552 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2553 case AV_CODEC_ID_MPEG1VIDEO:
2554 case AV_CODEC_ID_MPEG2VIDEO:
2555 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2556 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2558 case AV_CODEC_ID_MPEG4:
2559 if (CONFIG_MPEG4_ENCODER)
2560 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2562 case AV_CODEC_ID_MSMPEG4V2:
2563 case AV_CODEC_ID_MSMPEG4V3:
2564 case AV_CODEC_ID_WMV1:
2565 if (CONFIG_MSMPEG4_ENCODER)
2566 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2568 case AV_CODEC_ID_WMV2:
2569 if (CONFIG_WMV2_ENCODER)
2570 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2572 case AV_CODEC_ID_H261:
2573 if (CONFIG_H261_ENCODER)
2574 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2576 case AV_CODEC_ID_H263:
2577 case AV_CODEC_ID_H263P:
2578 case AV_CODEC_ID_FLV1:
2579 case AV_CODEC_ID_RV10:
2580 case AV_CODEC_ID_RV20:
2581 if (CONFIG_H263_ENCODER)
2582 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2584 case AV_CODEC_ID_MJPEG:
2585 case AV_CODEC_ID_AMV:
2586 if (CONFIG_MJPEG_ENCODER)
2587 ff_mjpeg_encode_mb(s, s->block);
2594 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2596 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2597 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2598 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2601 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2604 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2607 d->mb_skip_run= s->mb_skip_run;
2609 d->last_dc[i] = s->last_dc[i];
2612 d->mv_bits= s->mv_bits;
2613 d->i_tex_bits= s->i_tex_bits;
2614 d->p_tex_bits= s->p_tex_bits;
2615 d->i_count= s->i_count;
2616 d->f_count= s->f_count;
2617 d->b_count= s->b_count;
2618 d->skip_count= s->skip_count;
2619 d->misc_bits= s->misc_bits;
2623 d->qscale= s->qscale;
2624 d->dquant= s->dquant;
2626 d->esc3_level_length= s->esc3_level_length;
2629 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2632 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2633 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2636 d->mb_skip_run= s->mb_skip_run;
2638 d->last_dc[i] = s->last_dc[i];
2641 d->mv_bits= s->mv_bits;
2642 d->i_tex_bits= s->i_tex_bits;
2643 d->p_tex_bits= s->p_tex_bits;
2644 d->i_count= s->i_count;
2645 d->f_count= s->f_count;
2646 d->b_count= s->b_count;
2647 d->skip_count= s->skip_count;
2648 d->misc_bits= s->misc_bits;
2650 d->mb_intra= s->mb_intra;
2651 d->mb_skipped= s->mb_skipped;
2652 d->mv_type= s->mv_type;
2653 d->mv_dir= s->mv_dir;
2655 if(s->data_partitioning){
2657 d->tex_pb= s->tex_pb;
2661 d->block_last_index[i]= s->block_last_index[i];
2662 d->interlaced_dct= s->interlaced_dct;
2663 d->qscale= s->qscale;
2665 d->esc3_level_length= s->esc3_level_length;
2668 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2669 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2670 int *dmin, int *next_block, int motion_x, int motion_y)
2673 uint8_t *dest_backup[3];
2675 copy_context_before_encode(s, backup, type);
2677 s->block= s->blocks[*next_block];
2678 s->pb= pb[*next_block];
2679 if(s->data_partitioning){
2680 s->pb2 = pb2 [*next_block];
2681 s->tex_pb= tex_pb[*next_block];
2685 memcpy(dest_backup, s->dest, sizeof(s->dest));
2686 s->dest[0] = s->sc.rd_scratchpad;
2687 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2688 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2689 av_assert0(s->linesize >= 32); //FIXME
2692 encode_mb(s, motion_x, motion_y);
2694 score= put_bits_count(&s->pb);
2695 if(s->data_partitioning){
2696 score+= put_bits_count(&s->pb2);
2697 score+= put_bits_count(&s->tex_pb);
2700 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2701 ff_mpv_decode_mb(s, s->block);
2703 score *= s->lambda2;
2704 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2708 memcpy(s->dest, dest_backup, sizeof(s->dest));
2715 copy_context_after_encode(best, s, type);
2719 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2720 uint32_t *sq = ff_square_tab + 256;
2725 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2726 else if(w==8 && h==8)
2727 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2731 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2740 static int sse_mb(MpegEncContext *s){
2744 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2745 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2748 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2749 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2750 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2751 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2753 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2754 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2755 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2758 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2759 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2760 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2763 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2764 MpegEncContext *s= *(void**)arg;
2768 s->me.dia_size= s->avctx->pre_dia_size;
2769 s->first_slice_line=1;
2770 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2771 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2772 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2774 s->first_slice_line=0;
2782 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2783 MpegEncContext *s= *(void**)arg;
2785 ff_check_alignment();
2787 s->me.dia_size= s->avctx->dia_size;
2788 s->first_slice_line=1;
2789 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2790 s->mb_x=0; //for block init below
2791 ff_init_block_index(s);
2792 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2793 s->block_index[0]+=2;
2794 s->block_index[1]+=2;
2795 s->block_index[2]+=2;
2796 s->block_index[3]+=2;
2798 /* compute motion vector & mb_type and store in context */
2799 if(s->pict_type==AV_PICTURE_TYPE_B)
2800 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2802 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2804 s->first_slice_line=0;
2809 static int mb_var_thread(AVCodecContext *c, void *arg){
2810 MpegEncContext *s= *(void**)arg;
2813 ff_check_alignment();
2815 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2816 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2819 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2821 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2823 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2824 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2826 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2827 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2828 s->me.mb_var_sum_temp += varc;
2834 static void write_slice_end(MpegEncContext *s){
2835 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2836 if(s->partitioned_frame){
2837 ff_mpeg4_merge_partitions(s);
2840 ff_mpeg4_stuffing(&s->pb);
2841 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2842 ff_mjpeg_encode_stuffing(s);
2845 avpriv_align_put_bits(&s->pb);
2846 flush_put_bits(&s->pb);
2848 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2849 s->misc_bits+= get_bits_diff(s);
2852 static void write_mb_info(MpegEncContext *s)
2854 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2855 int offset = put_bits_count(&s->pb);
2856 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2857 int gobn = s->mb_y / s->gob_index;
2859 if (CONFIG_H263_ENCODER)
2860 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2861 bytestream_put_le32(&ptr, offset);
2862 bytestream_put_byte(&ptr, s->qscale);
2863 bytestream_put_byte(&ptr, gobn);
2864 bytestream_put_le16(&ptr, mba);
2865 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2866 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2867 /* 4MV not implemented */
2868 bytestream_put_byte(&ptr, 0); /* hmv2 */
2869 bytestream_put_byte(&ptr, 0); /* vmv2 */
2872 static void update_mb_info(MpegEncContext *s, int startcode)
2876 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2877 s->mb_info_size += 12;
2878 s->prev_mb_info = s->last_mb_info;
2881 s->prev_mb_info = put_bits_count(&s->pb)/8;
2882 /* This might have incremented mb_info_size above, and we return without
2883 * actually writing any info into that slot yet. But in that case,
2884 * this will be called again at the start of the after writing the
2885 * start code, actually writing the mb info. */
2889 s->last_mb_info = put_bits_count(&s->pb)/8;
2890 if (!s->mb_info_size)
2891 s->mb_info_size += 12;
2895 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2897 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2898 && s->slice_context_count == 1
2899 && s->pb.buf == s->avctx->internal->byte_buffer) {
2900 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2901 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2903 uint8_t *new_buffer = NULL;
2904 int new_buffer_size = 0;
2906 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2907 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2908 return AVERROR(ENOMEM);
2911 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2912 s->avctx->internal->byte_buffer_size + size_increase);
2914 return AVERROR(ENOMEM);
2916 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2917 av_free(s->avctx->internal->byte_buffer);
2918 s->avctx->internal->byte_buffer = new_buffer;
2919 s->avctx->internal->byte_buffer_size = new_buffer_size;
2920 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2921 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2922 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2924 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2925 return AVERROR(EINVAL);
2929 static int encode_thread(AVCodecContext *c, void *arg){
2930 MpegEncContext *s= *(void**)arg;
2931 int mb_x, mb_y, pdif = 0;
2932 int chr_h= 16>>s->chroma_y_shift;
2934 MpegEncContext best_s = { 0 }, backup_s;
2935 uint8_t bit_buf[2][MAX_MB_BYTES];
2936 uint8_t bit_buf2[2][MAX_MB_BYTES];
2937 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2938 PutBitContext pb[2], pb2[2], tex_pb[2];
2940 ff_check_alignment();
2943 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2944 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2945 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2948 s->last_bits= put_bits_count(&s->pb);
2959 /* init last dc values */
2960 /* note: quant matrix value (8) is implied here */
2961 s->last_dc[i] = 128 << s->intra_dc_precision;
2963 s->current_picture.encoding_error[i] = 0;
2965 if(s->codec_id==AV_CODEC_ID_AMV){
2966 s->last_dc[0] = 128*8/13;
2967 s->last_dc[1] = 128*8/14;
2968 s->last_dc[2] = 128*8/14;
2971 memset(s->last_mv, 0, sizeof(s->last_mv));
2975 switch(s->codec_id){
2976 case AV_CODEC_ID_H263:
2977 case AV_CODEC_ID_H263P:
2978 case AV_CODEC_ID_FLV1:
2979 if (CONFIG_H263_ENCODER)
2980 s->gob_index = H263_GOB_HEIGHT(s->height);
2982 case AV_CODEC_ID_MPEG4:
2983 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2984 ff_mpeg4_init_partitions(s);
2990 s->first_slice_line = 1;
2991 s->ptr_lastgob = s->pb.buf;
2992 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2996 ff_set_qscale(s, s->qscale);
2997 ff_init_block_index(s);
2999 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3000 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3001 int mb_type= s->mb_type[xy];
3005 int size_increase = s->avctx->internal->byte_buffer_size/4
3006 + s->mb_width*MAX_MB_BYTES;
3008 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
3009 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
3010 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3013 if(s->data_partitioning){
3014 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3015 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3016 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3022 s->mb_y = mb_y; // moved into loop, can get changed by H.261
3023 ff_update_block_index(s);
3025 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3026 ff_h261_reorder_mb_index(s);
3027 xy= s->mb_y*s->mb_stride + s->mb_x;
3028 mb_type= s->mb_type[xy];
3031 /* write gob / video packet header */
3033 int current_packet_size, is_gob_start;
3035 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3037 is_gob_start = s->rtp_payload_size &&
3038 current_packet_size >= s->rtp_payload_size &&
3041 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3043 switch(s->codec_id){
3044 case AV_CODEC_ID_H263:
3045 case AV_CODEC_ID_H263P:
3046 if(!s->h263_slice_structured)
3047 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3049 case AV_CODEC_ID_MPEG2VIDEO:
3050 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3051 case AV_CODEC_ID_MPEG1VIDEO:
3052 if(s->mb_skip_run) is_gob_start=0;
3054 case AV_CODEC_ID_MJPEG:
3055 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3060 if(s->start_mb_y != mb_y || mb_x!=0){
3063 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3064 ff_mpeg4_init_partitions(s);
3068 av_assert2((put_bits_count(&s->pb)&7) == 0);
3069 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3071 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3072 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3073 int d = 100 / s->error_rate;
3075 current_packet_size=0;
3076 s->pb.buf_ptr= s->ptr_lastgob;
3077 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3081 #if FF_API_RTP_CALLBACK
3082 FF_DISABLE_DEPRECATION_WARNINGS
3083 if (s->avctx->rtp_callback){
3084 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3085 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3087 FF_ENABLE_DEPRECATION_WARNINGS
3089 update_mb_info(s, 1);
3091 switch(s->codec_id){
3092 case AV_CODEC_ID_MPEG4:
3093 if (CONFIG_MPEG4_ENCODER) {
3094 ff_mpeg4_encode_video_packet_header(s);
3095 ff_mpeg4_clean_buffers(s);
3098 case AV_CODEC_ID_MPEG1VIDEO:
3099 case AV_CODEC_ID_MPEG2VIDEO:
3100 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3101 ff_mpeg1_encode_slice_header(s);
3102 ff_mpeg1_clean_buffers(s);
3105 case AV_CODEC_ID_H263:
3106 case AV_CODEC_ID_H263P:
3107 if (CONFIG_H263_ENCODER)
3108 ff_h263_encode_gob_header(s, mb_y);
3112 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3113 int bits= put_bits_count(&s->pb);
3114 s->misc_bits+= bits - s->last_bits;
3118 s->ptr_lastgob += current_packet_size;
3119 s->first_slice_line=1;
3120 s->resync_mb_x=mb_x;
3121 s->resync_mb_y=mb_y;
3125 if( (s->resync_mb_x == s->mb_x)
3126 && s->resync_mb_y+1 == s->mb_y){
3127 s->first_slice_line=0;
3131 s->dquant=0; //only for QP_RD
3133 update_mb_info(s, 0);
3135 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3137 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3139 copy_context_before_encode(&backup_s, s, -1);
3141 best_s.data_partitioning= s->data_partitioning;
3142 best_s.partitioned_frame= s->partitioned_frame;
3143 if(s->data_partitioning){
3144 backup_s.pb2= s->pb2;
3145 backup_s.tex_pb= s->tex_pb;
3148 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3149 s->mv_dir = MV_DIR_FORWARD;
3150 s->mv_type = MV_TYPE_16X16;
3152 s->mv[0][0][0] = s->p_mv_table[xy][0];
3153 s->mv[0][0][1] = s->p_mv_table[xy][1];
3154 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3155 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3157 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3158 s->mv_dir = MV_DIR_FORWARD;
3159 s->mv_type = MV_TYPE_FIELD;
3162 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3163 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3164 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3166 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3167 &dmin, &next_block, 0, 0);
3169 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3170 s->mv_dir = MV_DIR_FORWARD;
3171 s->mv_type = MV_TYPE_16X16;
3175 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3176 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3178 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3179 s->mv_dir = MV_DIR_FORWARD;
3180 s->mv_type = MV_TYPE_8X8;
3183 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3184 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3186 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3187 &dmin, &next_block, 0, 0);
3189 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3190 s->mv_dir = MV_DIR_FORWARD;
3191 s->mv_type = MV_TYPE_16X16;
3193 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3194 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3195 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3196 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3198 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3199 s->mv_dir = MV_DIR_BACKWARD;
3200 s->mv_type = MV_TYPE_16X16;
3202 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3203 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3204 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3205 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3207 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3208 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3209 s->mv_type = MV_TYPE_16X16;
3211 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3212 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3213 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3214 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3215 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3216 &dmin, &next_block, 0, 0);
3218 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3219 s->mv_dir = MV_DIR_FORWARD;
3220 s->mv_type = MV_TYPE_FIELD;
3223 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3224 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3225 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3227 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3228 &dmin, &next_block, 0, 0);
3230 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3231 s->mv_dir = MV_DIR_BACKWARD;
3232 s->mv_type = MV_TYPE_FIELD;
3235 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3236 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3237 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3239 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3240 &dmin, &next_block, 0, 0);
3242 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3243 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3244 s->mv_type = MV_TYPE_FIELD;
3246 for(dir=0; dir<2; dir++){
3248 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3249 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3250 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3253 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3254 &dmin, &next_block, 0, 0);
3256 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3258 s->mv_type = MV_TYPE_16X16;
3262 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3263 &dmin, &next_block, 0, 0);
3264 if(s->h263_pred || s->h263_aic){
3266 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3268 ff_clean_intra_table_entries(s); //old mode?
3272 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3273 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3274 const int last_qp= backup_s.qscale;
3277 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3278 static const int dquant_tab[4]={-1,1,-2,2};
3279 int storecoefs = s->mb_intra && s->dc_val[0];
3281 av_assert2(backup_s.dquant == 0);
3284 s->mv_dir= best_s.mv_dir;
3285 s->mv_type = MV_TYPE_16X16;
3286 s->mb_intra= best_s.mb_intra;
3287 s->mv[0][0][0] = best_s.mv[0][0][0];
3288 s->mv[0][0][1] = best_s.mv[0][0][1];
3289 s->mv[1][0][0] = best_s.mv[1][0][0];
3290 s->mv[1][0][1] = best_s.mv[1][0][1];
3292 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3293 for(; qpi<4; qpi++){
3294 int dquant= dquant_tab[qpi];
3295 qp= last_qp + dquant;
3296 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3298 backup_s.dquant= dquant;
3301 dc[i]= s->dc_val[0][ s->block_index[i] ];
3302 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3306 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3307 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3308 if(best_s.qscale != qp){
3311 s->dc_val[0][ s->block_index[i] ]= dc[i];
3312 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3319 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3320 int mx= s->b_direct_mv_table[xy][0];
3321 int my= s->b_direct_mv_table[xy][1];
3323 backup_s.dquant = 0;
3324 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3326 ff_mpeg4_set_direct_mv(s, mx, my);
3327 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3328 &dmin, &next_block, mx, my);
3330 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3331 backup_s.dquant = 0;
3332 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3334 ff_mpeg4_set_direct_mv(s, 0, 0);
3335 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3336 &dmin, &next_block, 0, 0);
3338 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3341 coded |= s->block_last_index[i];
3344 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3345 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3346 mx=my=0; //FIXME find the one we actually used
3347 ff_mpeg4_set_direct_mv(s, mx, my);
3348 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3356 s->mv_dir= best_s.mv_dir;
3357 s->mv_type = best_s.mv_type;
3359 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3360 s->mv[0][0][1] = best_s.mv[0][0][1];
3361 s->mv[1][0][0] = best_s.mv[1][0][0];
3362 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3365 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3366 &dmin, &next_block, mx, my);
3371 s->current_picture.qscale_table[xy] = best_s.qscale;
3373 copy_context_after_encode(s, &best_s, -1);
3375 pb_bits_count= put_bits_count(&s->pb);
3376 flush_put_bits(&s->pb);
3377 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3380 if(s->data_partitioning){
3381 pb2_bits_count= put_bits_count(&s->pb2);
3382 flush_put_bits(&s->pb2);
3383 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3384 s->pb2= backup_s.pb2;
3386 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3387 flush_put_bits(&s->tex_pb);
3388 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3389 s->tex_pb= backup_s.tex_pb;
3391 s->last_bits= put_bits_count(&s->pb);
3393 if (CONFIG_H263_ENCODER &&
3394 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3395 ff_h263_update_motion_val(s);
3397 if(next_block==0){ //FIXME 16 vs linesize16
3398 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3399 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3400 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3403 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3404 ff_mpv_decode_mb(s, s->block);
3406 int motion_x = 0, motion_y = 0;
3407 s->mv_type=MV_TYPE_16X16;
3408 // only one MB-Type possible
3411 case CANDIDATE_MB_TYPE_INTRA:
3414 motion_x= s->mv[0][0][0] = 0;
3415 motion_y= s->mv[0][0][1] = 0;
3417 case CANDIDATE_MB_TYPE_INTER:
3418 s->mv_dir = MV_DIR_FORWARD;
3420 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3421 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3423 case CANDIDATE_MB_TYPE_INTER_I:
3424 s->mv_dir = MV_DIR_FORWARD;
3425 s->mv_type = MV_TYPE_FIELD;
3428 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3429 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3430 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3433 case CANDIDATE_MB_TYPE_INTER4V:
3434 s->mv_dir = MV_DIR_FORWARD;
3435 s->mv_type = MV_TYPE_8X8;
3438 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3439 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3442 case CANDIDATE_MB_TYPE_DIRECT:
3443 if (CONFIG_MPEG4_ENCODER) {
3444 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3446 motion_x=s->b_direct_mv_table[xy][0];
3447 motion_y=s->b_direct_mv_table[xy][1];
3448 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3451 case CANDIDATE_MB_TYPE_DIRECT0:
3452 if (CONFIG_MPEG4_ENCODER) {
3453 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3455 ff_mpeg4_set_direct_mv(s, 0, 0);
3458 case CANDIDATE_MB_TYPE_BIDIR:
3459 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3461 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3462 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3463 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3464 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3466 case CANDIDATE_MB_TYPE_BACKWARD:
3467 s->mv_dir = MV_DIR_BACKWARD;
3469 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3470 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3472 case CANDIDATE_MB_TYPE_FORWARD:
3473 s->mv_dir = MV_DIR_FORWARD;
3475 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3476 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3478 case CANDIDATE_MB_TYPE_FORWARD_I:
3479 s->mv_dir = MV_DIR_FORWARD;
3480 s->mv_type = MV_TYPE_FIELD;
3483 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3484 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3485 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3488 case CANDIDATE_MB_TYPE_BACKWARD_I:
3489 s->mv_dir = MV_DIR_BACKWARD;
3490 s->mv_type = MV_TYPE_FIELD;
3493 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3494 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3495 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3498 case CANDIDATE_MB_TYPE_BIDIR_I:
3499 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3500 s->mv_type = MV_TYPE_FIELD;
3502 for(dir=0; dir<2; dir++){
3504 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3505 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3506 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3511 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3514 encode_mb(s, motion_x, motion_y);
3516 // RAL: Update last macroblock type
3517 s->last_mv_dir = s->mv_dir;
3519 if (CONFIG_H263_ENCODER &&
3520 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3521 ff_h263_update_motion_val(s);
3523 ff_mpv_decode_mb(s, s->block);
3526 /* clean the MV table in IPS frames for direct mode in B frames */
3527 if(s->mb_intra /* && I,P,S_TYPE */){
3528 s->p_mv_table[xy][0]=0;
3529 s->p_mv_table[xy][1]=0;
3532 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3536 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3537 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3539 s->current_picture.encoding_error[0] += sse(
3540 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3541 s->dest[0], w, h, s->linesize);
3542 s->current_picture.encoding_error[1] += sse(
3543 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3544 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3545 s->current_picture.encoding_error[2] += sse(
3546 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3547 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3550 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3551 ff_h263_loop_filter(s);
3553 ff_dlog(s->avctx, "MB %d %d bits\n",
3554 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3558 //not beautiful here but we must write it before flushing so it has to be here
3559 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3560 ff_msmpeg4_encode_ext_header(s);
3564 #if FF_API_RTP_CALLBACK
3565 FF_DISABLE_DEPRECATION_WARNINGS
3566 /* Send the last GOB if RTP */
3567 if (s->avctx->rtp_callback) {
3568 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3569 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3570 /* Call the RTP callback to send the last GOB */
3572 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3574 FF_ENABLE_DEPRECATION_WARNINGS
3580 #define MERGE(field) dst->field += src->field; src->field=0
3581 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3582 MERGE(me.scene_change_score);
3583 MERGE(me.mc_mb_var_sum_temp);
3584 MERGE(me.mb_var_sum_temp);
3587 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3590 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3591 MERGE(dct_count[1]);
3600 MERGE(er.error_count);
3601 MERGE(padding_bug_score);
3602 MERGE(current_picture.encoding_error[0]);
3603 MERGE(current_picture.encoding_error[1]);
3604 MERGE(current_picture.encoding_error[2]);
3606 if (dst->noise_reduction){
3607 for(i=0; i<64; i++){
3608 MERGE(dct_error_sum[0][i]);
3609 MERGE(dct_error_sum[1][i]);
3613 assert(put_bits_count(&src->pb) % 8 ==0);
3614 assert(put_bits_count(&dst->pb) % 8 ==0);
3615 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3616 flush_put_bits(&dst->pb);
3619 static int estimate_qp(MpegEncContext *s, int dry_run){
3620 if (s->next_lambda){
3621 s->current_picture_ptr->f->quality =
3622 s->current_picture.f->quality = s->next_lambda;
3623 if(!dry_run) s->next_lambda= 0;
3624 } else if (!s->fixed_qscale) {
3625 s->current_picture_ptr->f->quality =
3626 s->current_picture.f->quality = ff_rate_estimate_qscale(s, dry_run);
3627 if (s->current_picture.f->quality < 0)
3631 if(s->adaptive_quant){
3632 switch(s->codec_id){
3633 case AV_CODEC_ID_MPEG4:
3634 if (CONFIG_MPEG4_ENCODER)
3635 ff_clean_mpeg4_qscales(s);
3637 case AV_CODEC_ID_H263:
3638 case AV_CODEC_ID_H263P:
3639 case AV_CODEC_ID_FLV1:
3640 if (CONFIG_H263_ENCODER)
3641 ff_clean_h263_qscales(s);
3644 ff_init_qscale_tab(s);
3647 s->lambda= s->lambda_table[0];
3650 s->lambda = s->current_picture.f->quality;
3655 /* must be called before writing the header */
3656 static void set_frame_distances(MpegEncContext * s){
3657 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3658 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3660 if(s->pict_type==AV_PICTURE_TYPE_B){
3661 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3662 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3664 s->pp_time= s->time - s->last_non_b_time;
3665 s->last_non_b_time= s->time;
3666 assert(s->picture_number==0 || s->pp_time > 0);
3670 static int encode_picture(MpegEncContext *s, int picture_number)
3674 int context_count = s->slice_context_count;
3676 s->picture_number = picture_number;
3678 /* Reset the average MB variance */
3679 s->me.mb_var_sum_temp =
3680 s->me.mc_mb_var_sum_temp = 0;
3682 /* we need to initialize some time vars before we can encode b-frames */
3683 // RAL: Condition added for MPEG1VIDEO
3684 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3685 set_frame_distances(s);
3686 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3687 ff_set_mpeg4_time(s);
3689 s->me.scene_change_score=0;
3691 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3693 if(s->pict_type==AV_PICTURE_TYPE_I){
3694 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3695 else s->no_rounding=0;
3696 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3697 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3698 s->no_rounding ^= 1;
3701 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3702 if (estimate_qp(s,1) < 0)
3704 ff_get_2pass_fcode(s);
3705 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3706 if(s->pict_type==AV_PICTURE_TYPE_B)
3707 s->lambda= s->last_lambda_for[s->pict_type];
3709 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3713 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3714 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3715 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3716 s->q_chroma_intra_matrix = s->q_intra_matrix;
3717 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3720 s->mb_intra=0; //for the rate distortion & bit compare functions
3721 for(i=1; i<context_count; i++){
3722 ret = ff_update_duplicate_context(s->thread_context[i], s);
3730 /* Estimate motion for every MB */
3731 if(s->pict_type != AV_PICTURE_TYPE_I){
3732 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3733 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3734 if (s->pict_type != AV_PICTURE_TYPE_B) {
3735 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3736 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3740 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3741 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3743 for(i=0; i<s->mb_stride*s->mb_height; i++)
3744 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3746 if(!s->fixed_qscale){
3747 /* finding spatial complexity for I-frame rate control */
3748 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3751 for(i=1; i<context_count; i++){
3752 merge_context_after_me(s, s->thread_context[i]);
3754 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3755 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3758 if (s->me.scene_change_score > s->scenechange_threshold &&
3759 s->pict_type == AV_PICTURE_TYPE_P) {
3760 s->pict_type= AV_PICTURE_TYPE_I;
3761 for(i=0; i<s->mb_stride*s->mb_height; i++)
3762 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3763 if(s->msmpeg4_version >= 3)
3765 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3766 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3770 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3771 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3773 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3775 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3776 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3777 s->f_code= FFMAX3(s->f_code, a, b);
3780 ff_fix_long_p_mvs(s);
3781 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3782 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3786 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3787 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3792 if(s->pict_type==AV_PICTURE_TYPE_B){
3795 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3796 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3797 s->f_code = FFMAX(a, b);
3799 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3800 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3801 s->b_code = FFMAX(a, b);
3803 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3804 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3805 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3806 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3807 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3809 for(dir=0; dir<2; dir++){
3812 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3813 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3814 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3815 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3823 if (estimate_qp(s, 0) < 0)
3826 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3827 s->pict_type == AV_PICTURE_TYPE_I &&
3828 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3829 s->qscale= 3; //reduce clipping problems
3831 if (s->out_format == FMT_MJPEG) {
3832 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3833 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3835 if (s->avctx->intra_matrix) {
3837 luma_matrix = s->avctx->intra_matrix;
3839 if (s->avctx->chroma_intra_matrix)
3840 chroma_matrix = s->avctx->chroma_intra_matrix;
3842 /* for mjpeg, we do include qscale in the matrix */
3844 int j = s->idsp.idct_permutation[i];
3846 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3847 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3849 s->y_dc_scale_table=
3850 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3851 s->chroma_intra_matrix[0] =
3852 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3853 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3854 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3855 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3856 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3859 if(s->codec_id == AV_CODEC_ID_AMV){
3860 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3861 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3863 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3865 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3866 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3868 s->y_dc_scale_table= y;
3869 s->c_dc_scale_table= c;
3870 s->intra_matrix[0] = 13;
3871 s->chroma_intra_matrix[0] = 14;
3872 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3873 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3874 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3875 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3879 //FIXME var duplication
3880 s->current_picture_ptr->f->key_frame =
3881 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3882 s->current_picture_ptr->f->pict_type =
3883 s->current_picture.f->pict_type = s->pict_type;
3885 if (s->current_picture.f->key_frame)
3886 s->picture_in_gop_number=0;
3888 s->mb_x = s->mb_y = 0;
3889 s->last_bits= put_bits_count(&s->pb);
3890 switch(s->out_format) {
3892 if (CONFIG_MJPEG_ENCODER)
3893 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3894 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3897 if (CONFIG_H261_ENCODER)
3898 ff_h261_encode_picture_header(s, picture_number);
3901 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3902 ff_wmv2_encode_picture_header(s, picture_number);
3903 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3904 ff_msmpeg4_encode_picture_header(s, picture_number);
3905 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3906 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3909 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3910 ret = ff_rv10_encode_picture_header(s, picture_number);
3914 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3915 ff_rv20_encode_picture_header(s, picture_number);
3916 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3917 ff_flv_encode_picture_header(s, picture_number);
3918 else if (CONFIG_H263_ENCODER)
3919 ff_h263_encode_picture_header(s, picture_number);
3922 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3923 ff_mpeg1_encode_picture_header(s, picture_number);
3928 bits= put_bits_count(&s->pb);
3929 s->header_bits= bits - s->last_bits;
3931 for(i=1; i<context_count; i++){
3932 update_duplicate_context_after_me(s->thread_context[i], s);
3934 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3935 for(i=1; i<context_count; i++){
3936 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3937 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3938 merge_context_after_encode(s, s->thread_context[i]);
3944 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3945 const int intra= s->mb_intra;
3948 s->dct_count[intra]++;
3950 for(i=0; i<64; i++){
3951 int level= block[i];
3955 s->dct_error_sum[intra][i] += level;
3956 level -= s->dct_offset[intra][i];
3957 if(level<0) level=0;
3959 s->dct_error_sum[intra][i] -= level;
3960 level += s->dct_offset[intra][i];
3961 if(level>0) level=0;
3968 static int dct_quantize_trellis_c(MpegEncContext *s,
3969 int16_t *block, int n,
3970 int qscale, int *overflow){
3972 const uint16_t *matrix;
3973 const uint8_t *scantable= s->intra_scantable.scantable;
3974 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3976 unsigned int threshold1, threshold2;
3988 int coeff_count[64];
3989 int qmul, qadd, start_i, last_non_zero, i, dc;
3990 const int esc_length= s->ac_esc_length;
3992 uint8_t * last_length;
3993 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3996 s->fdsp.fdct(block);
3998 if(s->dct_error_sum)
3999 s->denoise_dct(s, block);
4001 qadd= ((qscale-1)|1)*8;
4003 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4004 else mpeg2_qscale = qscale << 1;
4015 /* For AIC we skip quant/dequant of INTRADC */
4020 /* note: block[0] is assumed to be positive */
4021 block[0] = (block[0] + (q >> 1)) / q;
4024 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4025 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4026 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4027 bias= 1<<(QMAT_SHIFT-1);
4029 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4030 length = s->intra_chroma_ac_vlc_length;
4031 last_length= s->intra_chroma_ac_vlc_last_length;
4033 length = s->intra_ac_vlc_length;
4034 last_length= s->intra_ac_vlc_last_length;
4039 qmat = s->q_inter_matrix[qscale];
4040 matrix = s->inter_matrix;
4041 length = s->inter_ac_vlc_length;
4042 last_length= s->inter_ac_vlc_last_length;
4046 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4047 threshold2= (threshold1<<1);
4049 for(i=63; i>=start_i; i--) {
4050 const int j = scantable[i];
4051 int level = block[j] * qmat[j];
4053 if(((unsigned)(level+threshold1))>threshold2){
4059 for(i=start_i; i<=last_non_zero; i++) {
4060 const int j = scantable[i];
4061 int level = block[j] * qmat[j];
4063 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4064 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4065 if(((unsigned)(level+threshold1))>threshold2){
4067 level= (bias + level)>>QMAT_SHIFT;
4069 coeff[1][i]= level-1;
4070 // coeff[2][k]= level-2;
4072 level= (bias - level)>>QMAT_SHIFT;
4073 coeff[0][i]= -level;
4074 coeff[1][i]= -level+1;
4075 // coeff[2][k]= -level+2;
4077 coeff_count[i]= FFMIN(level, 2);
4078 av_assert2(coeff_count[i]);
4081 coeff[0][i]= (level>>31)|1;
4086 *overflow= s->max_qcoeff < max; //overflow might have happened
4088 if(last_non_zero < start_i){
4089 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4090 return last_non_zero;
4093 score_tab[start_i]= 0;
4094 survivor[0]= start_i;
4097 for(i=start_i; i<=last_non_zero; i++){
4098 int level_index, j, zero_distortion;
4099 int dct_coeff= FFABS(block[ scantable[i] ]);
4100 int best_score=256*256*256*120;
4102 if (s->fdsp.fdct == ff_fdct_ifast)
4103 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4104 zero_distortion= dct_coeff*dct_coeff;
4106 for(level_index=0; level_index < coeff_count[i]; level_index++){
4108 int level= coeff[level_index][i];
4109 const int alevel= FFABS(level);
4114 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4115 unquant_coeff= alevel*qmul + qadd;
4116 } else if(s->out_format == FMT_MJPEG) {
4117 j = s->idsp.idct_permutation[scantable[i]];
4118 unquant_coeff = alevel * matrix[j] * 8;
4120 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4122 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4123 unquant_coeff = (unquant_coeff - 1) | 1;
4125 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4126 unquant_coeff = (unquant_coeff - 1) | 1;
4131 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4133 if((level&(~127)) == 0){
4134 for(j=survivor_count-1; j>=0; j--){
4135 int run= i - survivor[j];
4136 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4137 score += score_tab[i-run];
4139 if(score < best_score){
4142 level_tab[i+1]= level-64;
4146 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4147 for(j=survivor_count-1; j>=0; j--){
4148 int run= i - survivor[j];
4149 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4150 score += score_tab[i-run];
4151 if(score < last_score){
4154 last_level= level-64;
4160 distortion += esc_length*lambda;
4161 for(j=survivor_count-1; j>=0; j--){
4162 int run= i - survivor[j];
4163 int score= distortion + score_tab[i-run];
4165 if(score < best_score){
4168 level_tab[i+1]= level-64;
4172 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4173 for(j=survivor_count-1; j>=0; j--){
4174 int run= i - survivor[j];
4175 int score= distortion + score_tab[i-run];
4176 if(score < last_score){
4179 last_level= level-64;
4187 score_tab[i+1]= best_score;
4189 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
4190 if(last_non_zero <= 27){
4191 for(; survivor_count; survivor_count--){
4192 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4196 for(; survivor_count; survivor_count--){
4197 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4202 survivor[ survivor_count++ ]= i+1;
4205 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4206 last_score= 256*256*256*120;
4207 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4208 int score= score_tab[i];
4209 if(i) score += lambda*2; //FIXME exacter?
4211 if(score < last_score){
4214 last_level= level_tab[i];
4215 last_run= run_tab[i];
4220 s->coded_score[n] = last_score;
4222 dc= FFABS(block[0]);
4223 last_non_zero= last_i - 1;
4224 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4226 if(last_non_zero < start_i)
4227 return last_non_zero;
4229 if(last_non_zero == 0 && start_i == 0){
4231 int best_score= dc * dc;
4233 for(i=0; i<coeff_count[0]; i++){
4234 int level= coeff[i][0];
4235 int alevel= FFABS(level);
4236 int unquant_coeff, score, distortion;
4238 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4239 unquant_coeff= (alevel*qmul + qadd)>>3;
4241 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4242 unquant_coeff = (unquant_coeff - 1) | 1;
4244 unquant_coeff = (unquant_coeff + 4) >> 3;
4245 unquant_coeff<<= 3 + 3;
4247 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4249 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4250 else score= distortion + esc_length*lambda;
4252 if(score < best_score){
4254 best_level= level - 64;
4257 block[0]= best_level;
4258 s->coded_score[n] = best_score - dc*dc;
4259 if(best_level == 0) return -1;
4260 else return last_non_zero;
4264 av_assert2(last_level);
4266 block[ perm_scantable[last_non_zero] ]= last_level;
4269 for(; i>start_i; i -= run_tab[i] + 1){
4270 block[ perm_scantable[i-1] ]= level_tab[i];
4273 return last_non_zero;
4276 //#define REFINE_STATS 1
4277 static int16_t basis[64][64];
4279 static void build_basis(uint8_t *perm){
4286 double s= 0.25*(1<<BASIS_SHIFT);
4288 int perm_index= perm[index];
4289 if(i==0) s*= sqrt(0.5);
4290 if(j==0) s*= sqrt(0.5);
4291 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4298 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4299 int16_t *block, int16_t *weight, int16_t *orig,
4302 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4303 const uint8_t *scantable= s->intra_scantable.scantable;
4304 const uint8_t *perm_scantable= s->intra_scantable.permutated;
4305 // unsigned int threshold1, threshold2;
4310 int qmul, qadd, start_i, last_non_zero, i, dc;
4312 uint8_t * last_length;
4314 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4317 static int after_last=0;
4318 static int to_zero=0;
4319 static int from_zero=0;
4322 static int messed_sign=0;
4325 if(basis[0][0] == 0)
4326 build_basis(s->idsp.idct_permutation);
4337 /* For AIC we skip quant/dequant of INTRADC */
4341 q <<= RECON_SHIFT-3;
4342 /* note: block[0] is assumed to be positive */
4344 // block[0] = (block[0] + (q >> 1)) / q;
4346 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4347 // bias= 1<<(QMAT_SHIFT-1);
4348 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4349 length = s->intra_chroma_ac_vlc_length;
4350 last_length= s->intra_chroma_ac_vlc_last_length;
4352 length = s->intra_ac_vlc_length;
4353 last_length= s->intra_ac_vlc_last_length;
4358 length = s->inter_ac_vlc_length;
4359 last_length= s->inter_ac_vlc_last_length;
4361 last_non_zero = s->block_last_index[n];
4366 dc += (1<<(RECON_SHIFT-1));
4367 for(i=0; i<64; i++){
4368 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
4371 STOP_TIMER("memset rem[]")}
4374 for(i=0; i<64; i++){
4379 w= FFABS(weight[i]) + qns*one;
4380 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4383 // w=weight[i] = (63*qns + (w/2)) / w;
4386 av_assert2(w<(1<<6));
4389 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4395 for(i=start_i; i<=last_non_zero; i++){
4396 int j= perm_scantable[i];
4397 const int level= block[j];
4401 if(level<0) coeff= qmul*level - qadd;
4402 else coeff= qmul*level + qadd;
4403 run_tab[rle_index++]=run;
4406 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4412 if(last_non_zero>0){
4413 STOP_TIMER("init rem[]")
4420 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4423 int run2, best_unquant_change=0, analyze_gradient;
4427 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4429 if(analyze_gradient){
4433 for(i=0; i<64; i++){
4436 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4439 STOP_TIMER("rem*w*w")}
4449 const int level= block[0];
4450 int change, old_coeff;
4452 av_assert2(s->mb_intra);
4456 for(change=-1; change<=1; change+=2){
4457 int new_level= level + change;
4458 int score, new_coeff;
4460 new_coeff= q*new_level;
4461 if(new_coeff >= 2048 || new_coeff < 0)
4464 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4465 new_coeff - old_coeff);
4466 if(score<best_score){
4469 best_change= change;
4470 best_unquant_change= new_coeff - old_coeff;
4477 run2= run_tab[rle_index++];
4481 for(i=start_i; i<64; i++){
4482 int j= perm_scantable[i];
4483 const int level= block[j];
4484 int change, old_coeff;
4486 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4490 if(level<0) old_coeff= qmul*level - qadd;
4491 else old_coeff= qmul*level + qadd;
4492 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4496 av_assert2(run2>=0 || i >= last_non_zero );
4499 for(change=-1; change<=1; change+=2){
4500 int new_level= level + change;
4501 int score, new_coeff, unquant_change;
4504 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4508 if(new_level<0) new_coeff= qmul*new_level - qadd;
4509 else new_coeff= qmul*new_level + qadd;
4510 if(new_coeff >= 2048 || new_coeff <= -2048)
4512 //FIXME check for overflow
4515 if(level < 63 && level > -63){
4516 if(i < last_non_zero)
4517 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4518 - length[UNI_AC_ENC_INDEX(run, level+64)];
4520 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4521 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4524 av_assert2(FFABS(new_level)==1);
4526 if(analyze_gradient){
4527 int g= d1[ scantable[i] ];
4528 if(g && (g^new_level) >= 0)
4532 if(i < last_non_zero){
4533 int next_i= i + run2 + 1;
4534 int next_level= block[ perm_scantable[next_i] ] + 64;
4536 if(next_level&(~127))
4539 if(next_i < last_non_zero)
4540 score += length[UNI_AC_ENC_INDEX(run, 65)]
4541 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4542 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4544 score += length[UNI_AC_ENC_INDEX(run, 65)]
4545 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4546 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4548 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4550 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4551 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4557 av_assert2(FFABS(level)==1);
4559 if(i < last_non_zero){
4560 int next_i= i + run2 + 1;
4561 int next_level= block[ perm_scantable[next_i] ] + 64;
4563 if(next_level&(~127))
4566 if(next_i < last_non_zero)
4567 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4568 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4569 - length[UNI_AC_ENC_INDEX(run, 65)];
4571 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4572 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4573 - length[UNI_AC_ENC_INDEX(run, 65)];
4575 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4577 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4578 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4585 unquant_change= new_coeff - old_coeff;
4586 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4588 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4590 if(score<best_score){
4593 best_change= change;
4594 best_unquant_change= unquant_change;
4598 prev_level= level + 64;
4599 if(prev_level&(~127))
4608 STOP_TIMER("iterative step")}
4612 int j= perm_scantable[ best_coeff ];
4614 block[j] += best_change;
4616 if(best_coeff > last_non_zero){
4617 last_non_zero= best_coeff;
4618 av_assert2(block[j]);
4625 if(block[j] - best_change){
4626 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4638 for(; last_non_zero>=start_i; last_non_zero--){
4639 if(block[perm_scantable[last_non_zero]])
4645 if(256*256*256*64 % count == 0){
4646 av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4651 for(i=start_i; i<=last_non_zero; i++){
4652 int j= perm_scantable[i];
4653 const int level= block[j];
4656 run_tab[rle_index++]=run;
4663 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4669 if(last_non_zero>0){
4670 STOP_TIMER("iterative search")
4675 return last_non_zero;
4679 * Permute an 8x8 block according to permuatation.
4680 * @param block the block which will be permuted according to
4681 * the given permutation vector
4682 * @param permutation the permutation vector
4683 * @param last the last non zero coefficient in scantable order, used to
4684 * speed the permutation up
4685 * @param scantable the used scantable, this is only used to speed the
4686 * permutation up, the block is not (inverse) permutated
4687 * to scantable order!
4689 void ff_block_permute(int16_t *block, uint8_t *permutation,
4690 const uint8_t *scantable, int last)
4697 //FIXME it is ok but not clean and might fail for some permutations
4698 // if (permutation[1] == 1)
4701 for (i = 0; i <= last; i++) {
4702 const int j = scantable[i];
4707 for (i = 0; i <= last; i++) {
4708 const int j = scantable[i];
4709 const int perm_j = permutation[j];
4710 block[perm_j] = temp[j];
4714 int ff_dct_quantize_c(MpegEncContext *s,
4715 int16_t *block, int n,
4716 int qscale, int *overflow)
4718 int i, j, level, last_non_zero, q, start_i;
4720 const uint8_t *scantable= s->intra_scantable.scantable;
4723 unsigned int threshold1, threshold2;
4725 s->fdsp.fdct(block);
4727 if(s->dct_error_sum)
4728 s->denoise_dct(s, block);
4738 /* For AIC we skip quant/dequant of INTRADC */
4741 /* note: block[0] is assumed to be positive */
4742 block[0] = (block[0] + (q >> 1)) / q;
4745 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4746 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4750 qmat = s->q_inter_matrix[qscale];
4751 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4753 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4754 threshold2= (threshold1<<1);
4755 for(i=63;i>=start_i;i--) {
4757 level = block[j] * qmat[j];
4759 if(((unsigned)(level+threshold1))>threshold2){
4766 for(i=start_i; i<=last_non_zero; i++) {
4768 level = block[j] * qmat[j];
4770 // if( bias+level >= (1<<QMAT_SHIFT)
4771 // || bias-level >= (1<<QMAT_SHIFT)){
4772 if(((unsigned)(level+threshold1))>threshold2){
4774 level= (bias + level)>>QMAT_SHIFT;
4777 level= (bias - level)>>QMAT_SHIFT;
4785 *overflow= s->max_qcoeff < max; //overflow might have happened
4787 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4788 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4789 ff_block_permute(block, s->idsp.idct_permutation,
4790 scantable, last_non_zero);
4792 return last_non_zero;
4795 #define OFFSET(x) offsetof(MpegEncContext, x)
4796 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4797 static const AVOption h263_options[] = {
4798 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4799 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4804 static const AVClass h263_class = {
4805 .class_name = "H.263 encoder",
4806 .item_name = av_default_item_name,
4807 .option = h263_options,
4808 .version = LIBAVUTIL_VERSION_INT,
4811 AVCodec ff_h263_encoder = {
4813 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4814 .type = AVMEDIA_TYPE_VIDEO,
4815 .id = AV_CODEC_ID_H263,
4816 .priv_data_size = sizeof(MpegEncContext),
4817 .init = ff_mpv_encode_init,
4818 .encode2 = ff_mpv_encode_picture,
4819 .close = ff_mpv_encode_end,
4820 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4821 .priv_class = &h263_class,
4824 static const AVOption h263p_options[] = {
4825 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4826 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4827 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4828 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4832 static const AVClass h263p_class = {
4833 .class_name = "H.263p encoder",
4834 .item_name = av_default_item_name,
4835 .option = h263p_options,
4836 .version = LIBAVUTIL_VERSION_INT,
4839 AVCodec ff_h263p_encoder = {
4841 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4842 .type = AVMEDIA_TYPE_VIDEO,
4843 .id = AV_CODEC_ID_H263P,
4844 .priv_data_size = sizeof(MpegEncContext),
4845 .init = ff_mpv_encode_init,
4846 .encode2 = ff_mpv_encode_picture,
4847 .close = ff_mpv_encode_end,
4848 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4849 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4850 .priv_class = &h263p_class,
4853 static const AVClass msmpeg4v2_class = {
4854 .class_name = "msmpeg4v2 encoder",
4855 .item_name = av_default_item_name,
4856 .option = ff_mpv_generic_options,
4857 .version = LIBAVUTIL_VERSION_INT,
4860 AVCodec ff_msmpeg4v2_encoder = {
4861 .name = "msmpeg4v2",
4862 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4863 .type = AVMEDIA_TYPE_VIDEO,
4864 .id = AV_CODEC_ID_MSMPEG4V2,
4865 .priv_data_size = sizeof(MpegEncContext),
4866 .init = ff_mpv_encode_init,
4867 .encode2 = ff_mpv_encode_picture,
4868 .close = ff_mpv_encode_end,
4869 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4870 .priv_class = &msmpeg4v2_class,
4873 static const AVClass msmpeg4v3_class = {
4874 .class_name = "msmpeg4v3 encoder",
4875 .item_name = av_default_item_name,
4876 .option = ff_mpv_generic_options,
4877 .version = LIBAVUTIL_VERSION_INT,
4880 AVCodec ff_msmpeg4v3_encoder = {
4882 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4883 .type = AVMEDIA_TYPE_VIDEO,
4884 .id = AV_CODEC_ID_MSMPEG4V3,
4885 .priv_data_size = sizeof(MpegEncContext),
4886 .init = ff_mpv_encode_init,
4887 .encode2 = ff_mpv_encode_picture,
4888 .close = ff_mpv_encode_end,
4889 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4890 .priv_class = &msmpeg4v3_class,
4893 static const AVClass wmv1_class = {
4894 .class_name = "wmv1 encoder",
4895 .item_name = av_default_item_name,
4896 .option = ff_mpv_generic_options,
4897 .version = LIBAVUTIL_VERSION_INT,
4900 AVCodec ff_wmv1_encoder = {
4902 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4903 .type = AVMEDIA_TYPE_VIDEO,
4904 .id = AV_CODEC_ID_WMV1,
4905 .priv_data_size = sizeof(MpegEncContext),
4906 .init = ff_mpv_encode_init,
4907 .encode2 = ff_mpv_encode_picture,
4908 .close = ff_mpv_encode_end,
4909 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4910 .priv_class = &wmv1_class,