2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/mem_internal.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/thread.h"
47 #include "mpegvideo.h"
48 #include "mpegvideodata.h"
52 #include "mjpegenc_common.h"
54 #include "mpegutils.h"
56 #include "speedhqenc.h"
58 #include "pixblockdsp.h"
62 #include "aandcttab.h"
64 #include "mpeg4video.h"
66 #include "bytestream.h"
69 #include "packet_internal.h"
73 #define QUANT_BIAS_SHIFT 8
75 #define QMAT_SHIFT_MMX 16
78 static int encode_picture(MpegEncContext *s, int picture_number);
79 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
80 static int sse_mb(MpegEncContext *s);
81 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
82 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
84 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
85 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
87 const AVOption ff_mpv_generic_options[] = {
89 #if FF_API_MPEGVIDEO_OPTS
90 FF_MPV_DEPRECATED_MPEG_QUANT_OPT
91 FF_MPV_DEPRECATED_A53_CC_OPT
92 FF_MPV_DEPRECATED_MATRIX_OPT
97 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
98 uint16_t (*qmat16)[2][64],
99 const uint16_t *quant_matrix,
100 int bias, int qmin, int qmax, int intra)
102 FDCTDSPContext *fdsp = &s->fdsp;
106 for (qscale = qmin; qscale <= qmax; qscale++) {
110 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
111 else qscale2 = qscale << 1;
113 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
115 fdsp->fdct == ff_faandct ||
116 #endif /* CONFIG_FAANDCT */
117 fdsp->fdct == ff_jpeg_fdct_islow_10) {
118 for (i = 0; i < 64; i++) {
119 const int j = s->idsp.idct_permutation[i];
120 int64_t den = (int64_t) qscale2 * quant_matrix[j];
121 /* 16 <= qscale * quant_matrix[i] <= 7905
122 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
123 * 19952 <= x <= 249205026
124 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
125 * 3444240 >= (1 << 36) / (x) >= 275 */
127 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
129 } else if (fdsp->fdct == ff_fdct_ifast) {
130 for (i = 0; i < 64; i++) {
131 const int j = s->idsp.idct_permutation[i];
132 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
133 /* 16 <= qscale * quant_matrix[i] <= 7905
134 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
135 * 19952 <= x <= 249205026
136 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
137 * 3444240 >= (1 << 36) / (x) >= 275 */
139 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
142 for (i = 0; i < 64; i++) {
143 const int j = s->idsp.idct_permutation[i];
144 int64_t den = (int64_t) qscale2 * quant_matrix[j];
145 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
146 * Assume x = qscale * quant_matrix[i]
148 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
149 * so 32768 >= (1 << 19) / (x) >= 67 */
150 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
151 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
152 // (qscale * quant_matrix[i]);
153 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
155 if (qmat16[qscale][0][i] == 0 ||
156 qmat16[qscale][0][i] == 128 * 256)
157 qmat16[qscale][0][i] = 128 * 256 - 1;
158 qmat16[qscale][1][i] =
159 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
160 qmat16[qscale][0][i]);
164 for (i = intra; i < 64; i++) {
166 if (fdsp->fdct == ff_fdct_ifast) {
167 max = (8191LL * ff_aanscales[i]) >> 14;
169 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
175 av_log(s->avctx, AV_LOG_INFO,
176 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
181 static inline void update_qscale(MpegEncContext *s)
183 if (s->q_scale_type == 1 && 0) {
185 int bestdiff=INT_MAX;
188 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
189 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
190 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
191 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
193 if (diff < bestdiff) {
200 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
201 (FF_LAMBDA_SHIFT + 7);
202 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
205 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
209 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
215 for (i = 0; i < 64; i++) {
216 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
223 * init s->current_picture.qscale_table from s->lambda_table
225 void ff_init_qscale_tab(MpegEncContext *s)
227 int8_t * const qscale_table = s->current_picture.qscale_table;
230 for (i = 0; i < s->mb_num; i++) {
231 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
232 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
233 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
238 static void update_duplicate_context_after_me(MpegEncContext *dst,
241 #define COPY(a) dst->a= src->a
243 COPY(current_picture);
249 COPY(picture_in_gop_number);
250 COPY(gop_picture_number);
251 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
252 COPY(progressive_frame); // FIXME don't set in encode_header
253 COPY(partitioned_frame); // FIXME don't set in encode_header
257 static void mpv_encode_init_static(void)
259 for (int i = -16; i < 16; i++)
260 default_fcode_tab[i + MAX_MV] = 1;
264 * Set the given MpegEncContext to defaults for encoding.
265 * the changed fields will not depend upon the prior state of the MpegEncContext.
267 static void mpv_encode_defaults(MpegEncContext *s)
269 static AVOnce init_static_once = AV_ONCE_INIT;
271 ff_mpv_common_defaults(s);
273 ff_thread_once(&init_static_once, mpv_encode_init_static);
275 s->me.mv_penalty = default_mv_penalty;
276 s->fcode_tab = default_fcode_tab;
278 s->input_picture_number = 0;
279 s->picture_in_gop_number = 0;
282 av_cold int ff_dct_encode_init(MpegEncContext *s)
285 ff_dct_encode_init_x86(s);
287 if (CONFIG_H263_ENCODER)
288 ff_h263dsp_init(&s->h263dsp);
289 if (!s->dct_quantize)
290 s->dct_quantize = ff_dct_quantize_c;
292 s->denoise_dct = denoise_dct_c;
293 s->fast_dct_quantize = s->dct_quantize;
294 if (s->avctx->trellis)
295 s->dct_quantize = dct_quantize_trellis_c;
300 /* init video encoder */
301 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
303 MpegEncContext *s = avctx->priv_data;
304 AVCPBProperties *cpb_props;
307 mpv_encode_defaults(s);
309 switch (avctx->pix_fmt) {
310 case AV_PIX_FMT_YUVJ444P:
311 case AV_PIX_FMT_YUV444P:
312 s->chroma_format = CHROMA_444;
314 case AV_PIX_FMT_YUVJ422P:
315 case AV_PIX_FMT_YUV422P:
316 s->chroma_format = CHROMA_422;
318 case AV_PIX_FMT_YUVJ420P:
319 case AV_PIX_FMT_YUV420P:
321 s->chroma_format = CHROMA_420;
325 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
327 #if FF_API_PRIVATE_OPT
328 FF_DISABLE_DEPRECATION_WARNINGS
329 if (avctx->rtp_payload_size)
330 s->rtp_payload_size = avctx->rtp_payload_size;
331 if (avctx->me_penalty_compensation)
332 s->me_penalty_compensation = avctx->me_penalty_compensation;
334 s->me_pre = avctx->pre_me;
335 FF_ENABLE_DEPRECATION_WARNINGS
338 s->bit_rate = avctx->bit_rate;
339 s->width = avctx->width;
340 s->height = avctx->height;
341 if (avctx->gop_size > 600 &&
342 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
343 av_log(avctx, AV_LOG_WARNING,
344 "keyframe interval too large!, reducing it from %d to %d\n",
345 avctx->gop_size, 600);
346 avctx->gop_size = 600;
348 s->gop_size = avctx->gop_size;
350 if (avctx->max_b_frames > MAX_B_FRAMES) {
351 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
352 "is %d.\n", MAX_B_FRAMES);
353 avctx->max_b_frames = MAX_B_FRAMES;
355 s->max_b_frames = avctx->max_b_frames;
356 s->codec_id = avctx->codec->id;
357 s->strict_std_compliance = avctx->strict_std_compliance;
358 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
359 s->rtp_mode = !!s->rtp_payload_size;
360 s->intra_dc_precision = avctx->intra_dc_precision;
362 // workaround some differences between how applications specify dc precision
363 if (s->intra_dc_precision < 0) {
364 s->intra_dc_precision += 8;
365 } else if (s->intra_dc_precision >= 8)
366 s->intra_dc_precision -= 8;
368 if (s->intra_dc_precision < 0) {
369 av_log(avctx, AV_LOG_ERROR,
370 "intra dc precision must be positive, note some applications use"
371 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
372 return AVERROR(EINVAL);
375 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
378 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
379 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
380 return AVERROR(EINVAL);
382 s->user_specified_pts = AV_NOPTS_VALUE;
384 if (s->gop_size <= 1) {
392 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
394 s->adaptive_quant = (avctx->lumi_masking ||
395 avctx->dark_masking ||
396 avctx->temporal_cplx_masking ||
397 avctx->spatial_cplx_masking ||
400 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
403 s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
405 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
406 switch(avctx->codec_id) {
407 case AV_CODEC_ID_MPEG1VIDEO:
408 case AV_CODEC_ID_MPEG2VIDEO:
409 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
411 case AV_CODEC_ID_MPEG4:
412 case AV_CODEC_ID_MSMPEG4V1:
413 case AV_CODEC_ID_MSMPEG4V2:
414 case AV_CODEC_ID_MSMPEG4V3:
415 if (avctx->rc_max_rate >= 15000000) {
416 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
417 } else if(avctx->rc_max_rate >= 2000000) {
418 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
419 } else if(avctx->rc_max_rate >= 384000) {
420 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
422 avctx->rc_buffer_size = 40;
423 avctx->rc_buffer_size *= 16384;
426 if (avctx->rc_buffer_size) {
427 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
431 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
432 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
433 return AVERROR(EINVAL);
436 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
437 av_log(avctx, AV_LOG_INFO,
438 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
441 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
442 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
443 return AVERROR(EINVAL);
446 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
447 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
448 return AVERROR(EINVAL);
451 if (avctx->rc_max_rate &&
452 avctx->rc_max_rate == avctx->bit_rate &&
453 avctx->rc_max_rate != avctx->rc_min_rate) {
454 av_log(avctx, AV_LOG_INFO,
455 "impossible bitrate constraints, this will fail\n");
458 if (avctx->rc_buffer_size &&
459 avctx->bit_rate * (int64_t)avctx->time_base.num >
460 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
461 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
462 return AVERROR(EINVAL);
465 if (!s->fixed_qscale &&
466 avctx->bit_rate * av_q2d(avctx->time_base) >
467 avctx->bit_rate_tolerance) {
468 av_log(avctx, AV_LOG_WARNING,
469 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
470 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
473 if (avctx->rc_max_rate &&
474 avctx->rc_min_rate == avctx->rc_max_rate &&
475 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
476 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
477 90000LL * (avctx->rc_buffer_size - 1) >
478 avctx->rc_max_rate * 0xFFFFLL) {
479 av_log(avctx, AV_LOG_INFO,
480 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
481 "specified vbv buffer is too large for the given bitrate!\n");
484 if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
485 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
486 s->codec_id != AV_CODEC_ID_FLV1) {
487 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
488 return AVERROR(EINVAL);
491 if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
492 av_log(avctx, AV_LOG_ERROR,
493 "OBMC is only supported with simple mb decision\n");
494 return AVERROR(EINVAL);
497 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
498 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
499 return AVERROR(EINVAL);
502 if (s->max_b_frames &&
503 s->codec_id != AV_CODEC_ID_MPEG4 &&
504 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
505 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
506 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
507 return AVERROR(EINVAL);
509 if (s->max_b_frames < 0) {
510 av_log(avctx, AV_LOG_ERROR,
511 "max b frames must be 0 or positive for mpegvideo based encoders\n");
512 return AVERROR(EINVAL);
515 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
516 s->codec_id == AV_CODEC_ID_H263 ||
517 s->codec_id == AV_CODEC_ID_H263P) &&
518 (avctx->sample_aspect_ratio.num > 255 ||
519 avctx->sample_aspect_ratio.den > 255)) {
520 av_log(avctx, AV_LOG_WARNING,
521 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
522 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
523 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
524 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
527 if ((s->codec_id == AV_CODEC_ID_H263 ||
528 s->codec_id == AV_CODEC_ID_H263P) &&
529 (avctx->width > 2048 ||
530 avctx->height > 1152 )) {
531 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
532 return AVERROR(EINVAL);
534 if ((s->codec_id == AV_CODEC_ID_H263 ||
535 s->codec_id == AV_CODEC_ID_H263P ||
536 s->codec_id == AV_CODEC_ID_RV20) &&
537 ((avctx->width &3) ||
538 (avctx->height&3) )) {
539 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
540 return AVERROR(EINVAL);
543 if (s->codec_id == AV_CODEC_ID_RV10 &&
545 avctx->height&15 )) {
546 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
547 return AVERROR(EINVAL);
550 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
551 s->codec_id == AV_CODEC_ID_WMV2) &&
553 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
554 return AVERROR(EINVAL);
557 if ((avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
558 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
559 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
560 return AVERROR(EINVAL);
563 #if FF_API_PRIVATE_OPT
564 FF_DISABLE_DEPRECATION_WARNINGS
565 if (avctx->mpeg_quant)
567 FF_ENABLE_DEPRECATION_WARNINGS
569 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
570 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
571 av_log(avctx, AV_LOG_ERROR,
572 "mpeg2 style quantization not supported by codec\n");
573 return AVERROR(EINVAL);
577 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
578 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
579 return AVERROR(EINVAL);
582 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
583 avctx->mb_decision != FF_MB_DECISION_RD) {
584 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
585 return AVERROR(EINVAL);
588 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
589 (s->codec_id == AV_CODEC_ID_AMV ||
590 s->codec_id == AV_CODEC_ID_MJPEG)) {
591 // Used to produce garbage with MJPEG.
592 av_log(avctx, AV_LOG_ERROR,
593 "QP RD is no longer compatible with MJPEG or AMV\n");
594 return AVERROR(EINVAL);
597 #if FF_API_PRIVATE_OPT
598 FF_DISABLE_DEPRECATION_WARNINGS
599 if (avctx->scenechange_threshold)
600 s->scenechange_threshold = avctx->scenechange_threshold;
601 FF_ENABLE_DEPRECATION_WARNINGS
604 if (s->scenechange_threshold < 1000000000 &&
605 (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
606 av_log(avctx, AV_LOG_ERROR,
607 "closed gop with scene change detection are not supported yet, "
608 "set threshold to 1000000000\n");
609 return AVERROR_PATCHWELCOME;
612 if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
613 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
614 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
615 av_log(avctx, AV_LOG_ERROR,
616 "low delay forcing is only available for mpeg2, "
617 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
618 return AVERROR(EINVAL);
620 if (s->max_b_frames != 0) {
621 av_log(avctx, AV_LOG_ERROR,
622 "B-frames cannot be used with low delay\n");
623 return AVERROR(EINVAL);
627 if (s->q_scale_type == 1) {
628 if (avctx->qmax > 28) {
629 av_log(avctx, AV_LOG_ERROR,
630 "non linear quant only supports qmax <= 28 currently\n");
631 return AVERROR_PATCHWELCOME;
635 if (avctx->slices > 1 &&
636 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
637 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
638 return AVERROR(EINVAL);
641 if (avctx->thread_count > 1 &&
642 s->codec_id != AV_CODEC_ID_MPEG4 &&
643 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
644 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
645 s->codec_id != AV_CODEC_ID_MJPEG &&
646 (s->codec_id != AV_CODEC_ID_H263P)) {
647 av_log(avctx, AV_LOG_ERROR,
648 "multi threaded encoding not supported by codec\n");
649 return AVERROR_PATCHWELCOME;
652 if (avctx->thread_count < 1) {
653 av_log(avctx, AV_LOG_ERROR,
654 "automatic thread number detection not supported by codec, "
656 return AVERROR_PATCHWELCOME;
659 #if FF_API_PRIVATE_OPT
660 FF_DISABLE_DEPRECATION_WARNINGS
661 if (avctx->b_frame_strategy)
662 s->b_frame_strategy = avctx->b_frame_strategy;
663 if (avctx->b_sensitivity != 40)
664 s->b_sensitivity = avctx->b_sensitivity;
665 FF_ENABLE_DEPRECATION_WARNINGS
668 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
669 av_log(avctx, AV_LOG_INFO,
670 "notice: b_frame_strategy only affects the first pass\n");
671 s->b_frame_strategy = 0;
674 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
676 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
677 avctx->time_base.den /= i;
678 avctx->time_base.num /= i;
682 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
683 // (a + x * 3 / 8) / x
684 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
685 s->inter_quant_bias = 0;
687 s->intra_quant_bias = 0;
689 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
692 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
693 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
694 return AVERROR(EINVAL);
697 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
699 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
700 avctx->time_base.den > (1 << 16) - 1) {
701 av_log(avctx, AV_LOG_ERROR,
702 "timebase %d/%d not supported by MPEG 4 standard, "
703 "the maximum admitted value for the timebase denominator "
704 "is %d\n", avctx->time_base.num, avctx->time_base.den,
706 return AVERROR(EINVAL);
708 s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
710 switch (avctx->codec->id) {
711 case AV_CODEC_ID_MPEG1VIDEO:
712 s->out_format = FMT_MPEG1;
713 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
714 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
716 case AV_CODEC_ID_MPEG2VIDEO:
717 s->out_format = FMT_MPEG1;
718 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
719 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
722 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
723 case AV_CODEC_ID_MJPEG:
724 case AV_CODEC_ID_AMV:
725 s->out_format = FMT_MJPEG;
726 s->intra_only = 1; /* force intra only for jpeg */
727 if ((ret = ff_mjpeg_encode_init(s)) < 0)
733 case AV_CODEC_ID_SPEEDHQ:
734 s->out_format = FMT_SPEEDHQ;
735 s->intra_only = 1; /* force intra only for SHQ */
736 if (!CONFIG_SPEEDHQ_ENCODER)
737 return AVERROR_ENCODER_NOT_FOUND;
738 if ((ret = ff_speedhq_encode_init(s)) < 0)
743 case AV_CODEC_ID_H261:
744 if (!CONFIG_H261_ENCODER)
745 return AVERROR_ENCODER_NOT_FOUND;
746 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
747 av_log(avctx, AV_LOG_ERROR,
748 "The specified picture size of %dx%d is not valid for the "
749 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
750 s->width, s->height);
751 return AVERROR(EINVAL);
753 s->out_format = FMT_H261;
756 s->rtp_mode = 0; /* Sliced encoding not supported */
758 case AV_CODEC_ID_H263:
759 if (!CONFIG_H263_ENCODER)
760 return AVERROR_ENCODER_NOT_FOUND;
761 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
762 s->width, s->height) == 8) {
763 av_log(avctx, AV_LOG_ERROR,
764 "The specified picture size of %dx%d is not valid for "
765 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
766 "352x288, 704x576, and 1408x1152. "
767 "Try H.263+.\n", s->width, s->height);
768 return AVERROR(EINVAL);
770 s->out_format = FMT_H263;
774 case AV_CODEC_ID_H263P:
775 s->out_format = FMT_H263;
778 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
779 s->modified_quant = s->h263_aic;
780 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
781 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
784 /* These are just to be sure */
788 case AV_CODEC_ID_FLV1:
789 s->out_format = FMT_H263;
790 s->h263_flv = 2; /* format = 1; 11-bit codes */
791 s->unrestricted_mv = 1;
792 s->rtp_mode = 0; /* don't allow GOB */
796 case AV_CODEC_ID_RV10:
797 s->out_format = FMT_H263;
801 case AV_CODEC_ID_RV20:
802 s->out_format = FMT_H263;
805 s->modified_quant = 1;
809 s->unrestricted_mv = 0;
811 case AV_CODEC_ID_MPEG4:
812 s->out_format = FMT_H263;
814 s->unrestricted_mv = 1;
815 s->low_delay = s->max_b_frames ? 0 : 1;
816 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
818 case AV_CODEC_ID_MSMPEG4V2:
819 s->out_format = FMT_H263;
821 s->unrestricted_mv = 1;
822 s->msmpeg4_version = 2;
826 case AV_CODEC_ID_MSMPEG4V3:
827 s->out_format = FMT_H263;
829 s->unrestricted_mv = 1;
830 s->msmpeg4_version = 3;
831 s->flipflop_rounding = 1;
835 case AV_CODEC_ID_WMV1:
836 s->out_format = FMT_H263;
838 s->unrestricted_mv = 1;
839 s->msmpeg4_version = 4;
840 s->flipflop_rounding = 1;
844 case AV_CODEC_ID_WMV2:
845 s->out_format = FMT_H263;
847 s->unrestricted_mv = 1;
848 s->msmpeg4_version = 5;
849 s->flipflop_rounding = 1;
854 return AVERROR(EINVAL);
857 #if FF_API_PRIVATE_OPT
858 FF_DISABLE_DEPRECATION_WARNINGS
859 if (avctx->noise_reduction)
860 s->noise_reduction = avctx->noise_reduction;
861 FF_ENABLE_DEPRECATION_WARNINGS
864 avctx->has_b_frames = !s->low_delay;
868 s->progressive_frame =
869 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
870 AV_CODEC_FLAG_INTERLACED_ME) ||
875 if ((ret = ff_mpv_common_init(s)) < 0)
878 ff_fdctdsp_init(&s->fdsp, avctx);
879 ff_me_cmp_init(&s->mecc, avctx);
880 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
881 ff_pixblockdsp_init(&s->pdsp, avctx);
882 ff_qpeldsp_init(&s->qdsp);
884 if (s->msmpeg4_version) {
885 int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
886 if (!(s->ac_stats = av_mallocz(ac_stats_size)))
887 return AVERROR(ENOMEM);
890 if (!(avctx->stats_out = av_mallocz(256)) ||
891 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
892 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
893 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
894 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
895 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
896 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
897 !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
898 !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
899 return AVERROR(ENOMEM);
901 if (s->noise_reduction) {
902 if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
903 return AVERROR(ENOMEM);
906 ff_dct_encode_init(s);
908 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
909 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
911 if (s->slice_context_count > 1) {
914 if (avctx->codec_id == AV_CODEC_ID_H263P)
915 s->h263_slice_structured = 1;
918 s->quant_precision = 5;
920 #if FF_API_PRIVATE_OPT
921 FF_DISABLE_DEPRECATION_WARNINGS
922 if (avctx->frame_skip_threshold)
923 s->frame_skip_threshold = avctx->frame_skip_threshold;
924 if (avctx->frame_skip_factor)
925 s->frame_skip_factor = avctx->frame_skip_factor;
926 if (avctx->frame_skip_exp)
927 s->frame_skip_exp = avctx->frame_skip_exp;
928 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
929 s->frame_skip_cmp = avctx->frame_skip_cmp;
930 FF_ENABLE_DEPRECATION_WARNINGS
933 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
934 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
936 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
937 ff_h261_encode_init(s);
938 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
939 ff_h263_encode_init(s);
940 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
941 ff_msmpeg4_encode_init(s);
942 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
943 && s->out_format == FMT_MPEG1)
944 ff_mpeg1_encode_init(s);
947 for (i = 0; i < 64; i++) {
948 int j = s->idsp.idct_permutation[i];
949 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
951 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
952 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
953 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
955 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
956 } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
958 s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
961 s->chroma_intra_matrix[j] =
962 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
963 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
965 if (avctx->intra_matrix)
966 s->intra_matrix[j] = avctx->intra_matrix[i];
967 if (avctx->inter_matrix)
968 s->inter_matrix[j] = avctx->inter_matrix[i];
971 /* precompute matrix */
972 /* for mjpeg, we do include qscale in the matrix */
973 if (s->out_format != FMT_MJPEG) {
974 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
975 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
977 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
978 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
982 if ((ret = ff_rate_control_init(s)) < 0)
985 #if FF_API_PRIVATE_OPT
986 FF_DISABLE_DEPRECATION_WARNINGS
987 if (avctx->brd_scale)
988 s->brd_scale = avctx->brd_scale;
990 if (avctx->prediction_method)
991 s->pred = avctx->prediction_method + 1;
992 FF_ENABLE_DEPRECATION_WARNINGS
995 if (s->b_frame_strategy == 2) {
996 for (i = 0; i < s->max_b_frames + 2; i++) {
997 s->tmp_frames[i] = av_frame_alloc();
998 if (!s->tmp_frames[i])
999 return AVERROR(ENOMEM);
1001 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1002 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1003 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1005 ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1011 cpb_props = ff_add_cpb_side_data(avctx);
1013 return AVERROR(ENOMEM);
1014 cpb_props->max_bitrate = avctx->rc_max_rate;
1015 cpb_props->min_bitrate = avctx->rc_min_rate;
1016 cpb_props->avg_bitrate = avctx->bit_rate;
1017 cpb_props->buffer_size = avctx->rc_buffer_size;
1022 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1024 MpegEncContext *s = avctx->priv_data;
1027 ff_rate_control_uninit(s);
1029 ff_mpv_common_end(s);
1030 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
1031 s->out_format == FMT_MJPEG)
1032 ff_mjpeg_encode_close(s);
1034 av_freep(&avctx->extradata);
1036 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1037 av_frame_free(&s->tmp_frames[i]);
1039 ff_free_picture_tables(&s->new_picture);
1040 ff_mpeg_unref_picture(avctx, &s->new_picture);
1042 av_freep(&avctx->stats_out);
1043 av_freep(&s->ac_stats);
1045 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1046 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1047 s->q_chroma_intra_matrix= NULL;
1048 s->q_chroma_intra_matrix16= NULL;
1049 av_freep(&s->q_intra_matrix);
1050 av_freep(&s->q_inter_matrix);
1051 av_freep(&s->q_intra_matrix16);
1052 av_freep(&s->q_inter_matrix16);
1053 av_freep(&s->input_picture);
1054 av_freep(&s->reordered_input_picture);
1055 av_freep(&s->dct_offset);
1060 static int get_sae(uint8_t *src, int ref, int stride)
1065 for (y = 0; y < 16; y++) {
1066 for (x = 0; x < 16; x++) {
1067 acc += FFABS(src[x + y * stride] - ref);
1074 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1075 uint8_t *ref, int stride)
1081 h = s->height & ~15;
1083 for (y = 0; y < h; y += 16) {
1084 for (x = 0; x < w; x += 16) {
1085 int offset = x + y * stride;
1086 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1088 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1089 int sae = get_sae(src + offset, mean, stride);
1091 acc += sae + 500 < sad;
1097 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1099 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1100 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1101 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1102 &s->linesize, &s->uvlinesize);
1105 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1107 Picture *pic = NULL;
1109 int i, display_picture_number = 0, ret;
1110 int encoding_delay = s->max_b_frames ? s->max_b_frames
1111 : (s->low_delay ? 0 : 1);
1112 int flush_offset = 1;
1117 display_picture_number = s->input_picture_number++;
1119 if (pts != AV_NOPTS_VALUE) {
1120 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1121 int64_t last = s->user_specified_pts;
1124 av_log(s->avctx, AV_LOG_ERROR,
1125 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1127 return AVERROR(EINVAL);
1130 if (!s->low_delay && display_picture_number == 1)
1131 s->dts_delta = pts - last;
1133 s->user_specified_pts = pts;
1135 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1136 s->user_specified_pts =
1137 pts = s->user_specified_pts + 1;
1138 av_log(s->avctx, AV_LOG_INFO,
1139 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1142 pts = display_picture_number;
1146 if (!pic_arg->buf[0] ||
1147 pic_arg->linesize[0] != s->linesize ||
1148 pic_arg->linesize[1] != s->uvlinesize ||
1149 pic_arg->linesize[2] != s->uvlinesize)
1151 if ((s->width & 15) || (s->height & 15))
1153 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1155 if (s->linesize & (STRIDE_ALIGN-1))
1158 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1159 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1161 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1165 pic = &s->picture[i];
1169 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1172 ret = alloc_picture(s, pic, direct);
1177 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1178 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1179 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1182 int h_chroma_shift, v_chroma_shift;
1183 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1187 for (i = 0; i < 3; i++) {
1188 int src_stride = pic_arg->linesize[i];
1189 int dst_stride = i ? s->uvlinesize : s->linesize;
1190 int h_shift = i ? h_chroma_shift : 0;
1191 int v_shift = i ? v_chroma_shift : 0;
1192 int w = s->width >> h_shift;
1193 int h = s->height >> v_shift;
1194 uint8_t *src = pic_arg->data[i];
1195 uint8_t *dst = pic->f->data[i];
1198 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1199 && !s->progressive_sequence
1200 && FFALIGN(s->height, 32) - s->height > 16)
1203 if (!s->avctx->rc_buffer_size)
1204 dst += INPLACE_OFFSET;
1206 if (src_stride == dst_stride)
1207 memcpy(dst, src, src_stride * h);
1210 uint8_t *dst2 = dst;
1212 memcpy(dst2, src, w);
1217 if ((s->width & 15) || (s->height & (vpad-1))) {
1218 s->mpvencdsp.draw_edges(dst, dst_stride,
1228 ret = av_frame_copy_props(pic->f, pic_arg);
1232 pic->f->display_picture_number = display_picture_number;
1233 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1235 /* Flushing: When we have not received enough input frames,
1236 * ensure s->input_picture[0] contains the first picture */
1237 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1238 if (s->input_picture[flush_offset])
1241 if (flush_offset <= 1)
1244 encoding_delay = encoding_delay - flush_offset + 1;
1247 /* shift buffer entries */
1248 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1249 s->input_picture[i - flush_offset] = s->input_picture[i];
1251 s->input_picture[encoding_delay] = (Picture*) pic;
1256 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1260 int64_t score64 = 0;
1262 for (plane = 0; plane < 3; plane++) {
1263 const int stride = p->f->linesize[plane];
1264 const int bw = plane ? 1 : 2;
1265 for (y = 0; y < s->mb_height * bw; y++) {
1266 for (x = 0; x < s->mb_width * bw; x++) {
1267 int off = p->shared ? 0 : 16;
1268 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1269 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1270 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1272 switch (FFABS(s->frame_skip_exp)) {
1273 case 0: score = FFMAX(score, v); break;
1274 case 1: score += FFABS(v); break;
1275 case 2: score64 += v * (int64_t)v; break;
1276 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1277 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1286 if (s->frame_skip_exp < 0)
1287 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1288 -1.0/s->frame_skip_exp);
1290 if (score64 < s->frame_skip_threshold)
1292 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1297 static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
1302 ret = avcodec_send_frame(c, frame);
1307 ret = avcodec_receive_packet(c, pkt);
1310 av_packet_unref(pkt);
1311 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1318 static int estimate_best_b_count(MpegEncContext *s)
1320 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1322 const int scale = s->brd_scale;
1323 int width = s->width >> scale;
1324 int height = s->height >> scale;
1325 int i, j, out_size, p_lambda, b_lambda, lambda2;
1326 int64_t best_rd = INT64_MAX;
1327 int best_b_count = -1;
1330 av_assert0(scale >= 0 && scale <= 3);
1332 pkt = av_packet_alloc();
1334 return AVERROR(ENOMEM);
1337 //s->next_picture_ptr->quality;
1338 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1339 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1340 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1341 if (!b_lambda) // FIXME we should do this somewhere else
1342 b_lambda = p_lambda;
1343 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1346 for (i = 0; i < s->max_b_frames + 2; i++) {
1347 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1348 s->next_picture_ptr;
1351 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1352 pre_input = *pre_input_ptr;
1353 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1355 if (!pre_input.shared && i) {
1356 data[0] += INPLACE_OFFSET;
1357 data[1] += INPLACE_OFFSET;
1358 data[2] += INPLACE_OFFSET;
1361 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1362 s->tmp_frames[i]->linesize[0],
1364 pre_input.f->linesize[0],
1366 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1367 s->tmp_frames[i]->linesize[1],
1369 pre_input.f->linesize[1],
1370 width >> 1, height >> 1);
1371 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1372 s->tmp_frames[i]->linesize[2],
1374 pre_input.f->linesize[2],
1375 width >> 1, height >> 1);
1379 for (j = 0; j < s->max_b_frames + 1; j++) {
1383 if (!s->input_picture[j])
1386 c = avcodec_alloc_context3(NULL);
1388 ret = AVERROR(ENOMEM);
1394 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1395 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1396 c->mb_decision = s->avctx->mb_decision;
1397 c->me_cmp = s->avctx->me_cmp;
1398 c->mb_cmp = s->avctx->mb_cmp;
1399 c->me_sub_cmp = s->avctx->me_sub_cmp;
1400 c->pix_fmt = AV_PIX_FMT_YUV420P;
1401 c->time_base = s->avctx->time_base;
1402 c->max_b_frames = s->max_b_frames;
1404 ret = avcodec_open2(c, codec, NULL);
1409 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1410 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1412 out_size = encode_frame(c, s->tmp_frames[0], pkt);
1418 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1420 for (i = 0; i < s->max_b_frames + 1; i++) {
1421 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1423 s->tmp_frames[i + 1]->pict_type = is_p ?
1424 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1425 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1427 out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1433 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1436 /* get the delayed frames */
1437 out_size = encode_frame(c, NULL, pkt);
1442 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1444 rd += c->error[0] + c->error[1] + c->error[2];
1452 avcodec_free_context(&c);
1453 av_packet_unref(pkt);
1460 av_packet_free(&pkt);
1462 return best_b_count;
1465 static int select_input_picture(MpegEncContext *s)
1469 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1470 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1471 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1473 /* set next picture type & ordering */
1474 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1475 if (s->frame_skip_threshold || s->frame_skip_factor) {
1476 if (s->picture_in_gop_number < s->gop_size &&
1477 s->next_picture_ptr &&
1478 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1479 // FIXME check that the gop check above is +-1 correct
1480 av_frame_unref(s->input_picture[0]->f);
1482 ff_vbv_update(s, 0);
1488 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1489 !s->next_picture_ptr || s->intra_only) {
1490 s->reordered_input_picture[0] = s->input_picture[0];
1491 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1492 s->reordered_input_picture[0]->f->coded_picture_number =
1493 s->coded_picture_number++;
1497 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1498 for (i = 0; i < s->max_b_frames + 1; i++) {
1499 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1501 if (pict_num >= s->rc_context.num_entries)
1503 if (!s->input_picture[i]) {
1504 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1508 s->input_picture[i]->f->pict_type =
1509 s->rc_context.entry[pict_num].new_pict_type;
1513 if (s->b_frame_strategy == 0) {
1514 b_frames = s->max_b_frames;
1515 while (b_frames && !s->input_picture[b_frames])
1517 } else if (s->b_frame_strategy == 1) {
1518 for (i = 1; i < s->max_b_frames + 1; i++) {
1519 if (s->input_picture[i] &&
1520 s->input_picture[i]->b_frame_score == 0) {
1521 s->input_picture[i]->b_frame_score =
1523 s->input_picture[i ]->f->data[0],
1524 s->input_picture[i - 1]->f->data[0],
1528 for (i = 0; i < s->max_b_frames + 1; i++) {
1529 if (!s->input_picture[i] ||
1530 s->input_picture[i]->b_frame_score - 1 >
1531 s->mb_num / s->b_sensitivity)
1535 b_frames = FFMAX(0, i - 1);
1538 for (i = 0; i < b_frames + 1; i++) {
1539 s->input_picture[i]->b_frame_score = 0;
1541 } else if (s->b_frame_strategy == 2) {
1542 b_frames = estimate_best_b_count(s);
1549 for (i = b_frames - 1; i >= 0; i--) {
1550 int type = s->input_picture[i]->f->pict_type;
1551 if (type && type != AV_PICTURE_TYPE_B)
1554 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1555 b_frames == s->max_b_frames) {
1556 av_log(s->avctx, AV_LOG_ERROR,
1557 "warning, too many B-frames in a row\n");
1560 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1561 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1562 s->gop_size > s->picture_in_gop_number) {
1563 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1565 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1567 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1571 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1572 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1575 s->reordered_input_picture[0] = s->input_picture[b_frames];
1576 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1577 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1578 s->reordered_input_picture[0]->f->coded_picture_number =
1579 s->coded_picture_number++;
1580 for (i = 0; i < b_frames; i++) {
1581 s->reordered_input_picture[i + 1] = s->input_picture[i];
1582 s->reordered_input_picture[i + 1]->f->pict_type =
1584 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1585 s->coded_picture_number++;
1590 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1592 if (s->reordered_input_picture[0]) {
1593 s->reordered_input_picture[0]->reference =
1594 s->reordered_input_picture[0]->f->pict_type !=
1595 AV_PICTURE_TYPE_B ? 3 : 0;
1597 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1600 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1601 // input is a shared pix, so we can't modify it -> allocate a new
1602 // one & ensure that the shared one is reuseable
1605 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1608 pic = &s->picture[i];
1610 pic->reference = s->reordered_input_picture[0]->reference;
1611 if (alloc_picture(s, pic, 0) < 0) {
1615 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1619 /* mark us unused / free shared pic */
1620 av_frame_unref(s->reordered_input_picture[0]->f);
1621 s->reordered_input_picture[0]->shared = 0;
1623 s->current_picture_ptr = pic;
1625 // input is not a shared pix -> reuse buffer for current_pix
1626 s->current_picture_ptr = s->reordered_input_picture[0];
1627 for (i = 0; i < 4; i++) {
1628 if (s->new_picture.f->data[i])
1629 s->new_picture.f->data[i] += INPLACE_OFFSET;
1632 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1633 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1634 s->current_picture_ptr)) < 0)
1637 s->picture_number = s->new_picture.f->display_picture_number;
1642 static void frame_end(MpegEncContext *s)
1644 if (s->unrestricted_mv &&
1645 s->current_picture.reference &&
1647 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1648 int hshift = desc->log2_chroma_w;
1649 int vshift = desc->log2_chroma_h;
1650 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1651 s->current_picture.f->linesize[0],
1652 s->h_edge_pos, s->v_edge_pos,
1653 EDGE_WIDTH, EDGE_WIDTH,
1654 EDGE_TOP | EDGE_BOTTOM);
1655 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1656 s->current_picture.f->linesize[1],
1657 s->h_edge_pos >> hshift,
1658 s->v_edge_pos >> vshift,
1659 EDGE_WIDTH >> hshift,
1660 EDGE_WIDTH >> vshift,
1661 EDGE_TOP | EDGE_BOTTOM);
1662 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1663 s->current_picture.f->linesize[2],
1664 s->h_edge_pos >> hshift,
1665 s->v_edge_pos >> vshift,
1666 EDGE_WIDTH >> hshift,
1667 EDGE_WIDTH >> vshift,
1668 EDGE_TOP | EDGE_BOTTOM);
1673 s->last_pict_type = s->pict_type;
1674 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1675 if (s->pict_type!= AV_PICTURE_TYPE_B)
1676 s->last_non_b_pict_type = s->pict_type;
1678 #if FF_API_CODED_FRAME
1679 FF_DISABLE_DEPRECATION_WARNINGS
1680 av_frame_unref(s->avctx->coded_frame);
1681 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1682 FF_ENABLE_DEPRECATION_WARNINGS
1684 #if FF_API_ERROR_FRAME
1685 FF_DISABLE_DEPRECATION_WARNINGS
1686 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1687 sizeof(s->current_picture.encoding_error));
1688 FF_ENABLE_DEPRECATION_WARNINGS
1692 static void update_noise_reduction(MpegEncContext *s)
1696 for (intra = 0; intra < 2; intra++) {
1697 if (s->dct_count[intra] > (1 << 16)) {
1698 for (i = 0; i < 64; i++) {
1699 s->dct_error_sum[intra][i] >>= 1;
1701 s->dct_count[intra] >>= 1;
1704 for (i = 0; i < 64; i++) {
1705 s->dct_offset[intra][i] = (s->noise_reduction *
1706 s->dct_count[intra] +
1707 s->dct_error_sum[intra][i] / 2) /
1708 (s->dct_error_sum[intra][i] + 1);
1713 static int frame_start(MpegEncContext *s)
1717 /* mark & release old frames */
1718 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1719 s->last_picture_ptr != s->next_picture_ptr &&
1720 s->last_picture_ptr->f->buf[0]) {
1721 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1724 s->current_picture_ptr->f->pict_type = s->pict_type;
1725 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1727 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1728 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1729 s->current_picture_ptr)) < 0)
1732 if (s->pict_type != AV_PICTURE_TYPE_B) {
1733 s->last_picture_ptr = s->next_picture_ptr;
1735 s->next_picture_ptr = s->current_picture_ptr;
1738 if (s->last_picture_ptr) {
1739 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1740 if (s->last_picture_ptr->f->buf[0] &&
1741 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1742 s->last_picture_ptr)) < 0)
1745 if (s->next_picture_ptr) {
1746 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1747 if (s->next_picture_ptr->f->buf[0] &&
1748 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1749 s->next_picture_ptr)) < 0)
1753 if (s->picture_structure!= PICT_FRAME) {
1755 for (i = 0; i < 4; i++) {
1756 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1757 s->current_picture.f->data[i] +=
1758 s->current_picture.f->linesize[i];
1760 s->current_picture.f->linesize[i] *= 2;
1761 s->last_picture.f->linesize[i] *= 2;
1762 s->next_picture.f->linesize[i] *= 2;
1766 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1767 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1768 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1769 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1770 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1771 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1773 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1774 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1777 if (s->dct_error_sum) {
1778 av_assert2(s->noise_reduction && s->encoding);
1779 update_noise_reduction(s);
1785 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1786 const AVFrame *pic_arg, int *got_packet)
1788 MpegEncContext *s = avctx->priv_data;
1789 int i, stuffing_count, ret;
1790 int context_count = s->slice_context_count;
1792 s->vbv_ignore_qmax = 0;
1794 s->picture_in_gop_number++;
1796 if (load_input_picture(s, pic_arg) < 0)
1799 if (select_input_picture(s) < 0) {
1804 if (s->new_picture.f->data[0]) {
1805 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1806 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1808 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1809 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1812 s->mb_info_ptr = av_packet_new_side_data(pkt,
1813 AV_PKT_DATA_H263_MB_INFO,
1814 s->mb_width*s->mb_height*12);
1815 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1818 for (i = 0; i < context_count; i++) {
1819 int start_y = s->thread_context[i]->start_mb_y;
1820 int end_y = s->thread_context[i]-> end_mb_y;
1821 int h = s->mb_height;
1822 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1823 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1825 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1828 s->pict_type = s->new_picture.f->pict_type;
1830 ret = frame_start(s);
1834 ret = encode_picture(s, s->picture_number);
1835 if (growing_buffer) {
1836 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1837 pkt->data = s->pb.buf;
1838 pkt->size = avctx->internal->byte_buffer_size;
1843 #if FF_API_STAT_BITS
1844 FF_DISABLE_DEPRECATION_WARNINGS
1845 avctx->header_bits = s->header_bits;
1846 avctx->mv_bits = s->mv_bits;
1847 avctx->misc_bits = s->misc_bits;
1848 avctx->i_tex_bits = s->i_tex_bits;
1849 avctx->p_tex_bits = s->p_tex_bits;
1850 avctx->i_count = s->i_count;
1851 // FIXME f/b_count in avctx
1852 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1853 avctx->skip_count = s->skip_count;
1854 FF_ENABLE_DEPRECATION_WARNINGS
1859 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
1860 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1862 if (avctx->rc_buffer_size) {
1863 RateControlContext *rcc = &s->rc_context;
1864 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1865 int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1866 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1868 if (put_bits_count(&s->pb) > max_size &&
1869 s->lambda < s->lmax) {
1870 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1871 (s->qscale + 1) / s->qscale);
1872 if (s->adaptive_quant) {
1874 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1875 s->lambda_table[i] =
1876 FFMAX(s->lambda_table[i] + min_step,
1877 s->lambda_table[i] * (s->qscale + 1) /
1880 s->mb_skipped = 0; // done in frame_start()
1881 // done in encode_picture() so we must undo it
1882 if (s->pict_type == AV_PICTURE_TYPE_P) {
1883 if (s->flipflop_rounding ||
1884 s->codec_id == AV_CODEC_ID_H263P ||
1885 s->codec_id == AV_CODEC_ID_MPEG4)
1886 s->no_rounding ^= 1;
1888 if (s->pict_type != AV_PICTURE_TYPE_B) {
1889 s->time_base = s->last_time_base;
1890 s->last_non_b_time = s->time - s->pp_time;
1892 for (i = 0; i < context_count; i++) {
1893 PutBitContext *pb = &s->thread_context[i]->pb;
1894 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1896 s->vbv_ignore_qmax = 1;
1897 av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1901 av_assert0(avctx->rc_max_rate);
1904 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1905 ff_write_pass1_stats(s);
1907 for (i = 0; i < 4; i++) {
1908 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1909 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1911 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1912 s->current_picture_ptr->encoding_error,
1913 (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1916 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1917 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1918 s->misc_bits + s->i_tex_bits +
1920 flush_put_bits(&s->pb);
1921 s->frame_bits = put_bits_count(&s->pb);
1923 stuffing_count = ff_vbv_update(s, s->frame_bits);
1924 s->stuffing_bits = 8*stuffing_count;
1925 if (stuffing_count) {
1926 if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1927 av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1931 switch (s->codec_id) {
1932 case AV_CODEC_ID_MPEG1VIDEO:
1933 case AV_CODEC_ID_MPEG2VIDEO:
1934 while (stuffing_count--) {
1935 put_bits(&s->pb, 8, 0);
1938 case AV_CODEC_ID_MPEG4:
1939 put_bits(&s->pb, 16, 0);
1940 put_bits(&s->pb, 16, 0x1C3);
1941 stuffing_count -= 4;
1942 while (stuffing_count--) {
1943 put_bits(&s->pb, 8, 0xFF);
1947 av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1949 flush_put_bits(&s->pb);
1950 s->frame_bits = put_bits_count(&s->pb);
1953 /* update MPEG-1/2 vbv_delay for CBR */
1954 if (avctx->rc_max_rate &&
1955 avctx->rc_min_rate == avctx->rc_max_rate &&
1956 s->out_format == FMT_MPEG1 &&
1957 90000LL * (avctx->rc_buffer_size - 1) <=
1958 avctx->rc_max_rate * 0xFFFFLL) {
1959 AVCPBProperties *props;
1962 int vbv_delay, min_delay;
1963 double inbits = avctx->rc_max_rate *
1964 av_q2d(avctx->time_base);
1965 int minbits = s->frame_bits - 8 *
1966 (s->vbv_delay_ptr - s->pb.buf - 1);
1967 double bits = s->rc_context.buffer_index + minbits - inbits;
1970 av_log(avctx, AV_LOG_ERROR,
1971 "Internal error, negative bits\n");
1973 av_assert1(s->repeat_first_field == 0);
1975 vbv_delay = bits * 90000 / avctx->rc_max_rate;
1976 min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
1979 vbv_delay = FFMAX(vbv_delay, min_delay);
1981 av_assert0(vbv_delay < 0xFFFF);
1983 s->vbv_delay_ptr[0] &= 0xF8;
1984 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1985 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1986 s->vbv_delay_ptr[2] &= 0x07;
1987 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1989 props = av_cpb_properties_alloc(&props_size);
1991 return AVERROR(ENOMEM);
1992 props->vbv_delay = vbv_delay * 300;
1994 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
1995 (uint8_t*)props, props_size);
2001 #if FF_API_VBV_DELAY
2002 FF_DISABLE_DEPRECATION_WARNINGS
2003 avctx->vbv_delay = vbv_delay * 300;
2004 FF_ENABLE_DEPRECATION_WARNINGS
2007 s->total_bits += s->frame_bits;
2008 #if FF_API_STAT_BITS
2009 FF_DISABLE_DEPRECATION_WARNINGS
2010 avctx->frame_bits = s->frame_bits;
2011 FF_ENABLE_DEPRECATION_WARNINGS
2015 pkt->pts = s->current_picture.f->pts;
2016 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2017 if (!s->current_picture.f->coded_picture_number)
2018 pkt->dts = pkt->pts - s->dts_delta;
2020 pkt->dts = s->reordered_pts;
2021 s->reordered_pts = pkt->pts;
2023 pkt->dts = pkt->pts;
2024 if (s->current_picture.f->key_frame)
2025 pkt->flags |= AV_PKT_FLAG_KEY;
2027 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2032 /* release non-reference frames */
2033 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2034 if (!s->picture[i].reference)
2035 ff_mpeg_unref_picture(avctx, &s->picture[i]);
2038 av_assert1((s->frame_bits & 7) == 0);
2040 pkt->size = s->frame_bits / 8;
2041 *got_packet = !!pkt->size;
2045 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2046 int n, int threshold)
2048 static const char tab[64] = {
2049 3, 2, 2, 1, 1, 1, 1, 1,
2050 1, 1, 1, 1, 1, 1, 1, 1,
2051 1, 1, 1, 1, 1, 1, 1, 1,
2052 0, 0, 0, 0, 0, 0, 0, 0,
2053 0, 0, 0, 0, 0, 0, 0, 0,
2054 0, 0, 0, 0, 0, 0, 0, 0,
2055 0, 0, 0, 0, 0, 0, 0, 0,
2056 0, 0, 0, 0, 0, 0, 0, 0
2061 int16_t *block = s->block[n];
2062 const int last_index = s->block_last_index[n];
2065 if (threshold < 0) {
2067 threshold = -threshold;
2071 /* Are all we could set to zero already zero? */
2072 if (last_index <= skip_dc - 1)
2075 for (i = 0; i <= last_index; i++) {
2076 const int j = s->intra_scantable.permutated[i];
2077 const int level = FFABS(block[j]);
2079 if (skip_dc && i == 0)
2083 } else if (level > 1) {
2089 if (score >= threshold)
2091 for (i = skip_dc; i <= last_index; i++) {
2092 const int j = s->intra_scantable.permutated[i];
2096 s->block_last_index[n] = 0;
2098 s->block_last_index[n] = -1;
2101 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2105 const int maxlevel = s->max_qcoeff;
2106 const int minlevel = s->min_qcoeff;
2110 i = 1; // skip clipping of intra dc
2114 for (; i <= last_index; i++) {
2115 const int j = s->intra_scantable.permutated[i];
2116 int level = block[j];
2118 if (level > maxlevel) {
2121 } else if (level < minlevel) {
2129 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2130 av_log(s->avctx, AV_LOG_INFO,
2131 "warning, clipping %d dct coefficients to %d..%d\n",
2132 overflow, minlevel, maxlevel);
2135 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2139 for (y = 0; y < 8; y++) {
2140 for (x = 0; x < 8; x++) {
2146 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2147 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2148 int v = ptr[x2 + y2 * stride];
2154 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2159 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2160 int motion_x, int motion_y,
2161 int mb_block_height,
2165 int16_t weight[12][64];
2166 int16_t orig[12][64];
2167 const int mb_x = s->mb_x;
2168 const int mb_y = s->mb_y;
2171 int dct_offset = s->linesize * 8; // default for progressive frames
2172 int uv_dct_offset = s->uvlinesize * 8;
2173 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2174 ptrdiff_t wrap_y, wrap_c;
2176 for (i = 0; i < mb_block_count; i++)
2177 skip_dct[i] = s->skipdct;
2179 if (s->adaptive_quant) {
2180 const int last_qp = s->qscale;
2181 const int mb_xy = mb_x + mb_y * s->mb_stride;
2183 s->lambda = s->lambda_table[mb_xy];
2186 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2187 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2188 s->dquant = s->qscale - last_qp;
2190 if (s->out_format == FMT_H263) {
2191 s->dquant = av_clip(s->dquant, -2, 2);
2193 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2195 if (s->pict_type == AV_PICTURE_TYPE_B) {
2196 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2199 if (s->mv_type == MV_TYPE_8X8)
2205 ff_set_qscale(s, last_qp + s->dquant);
2206 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2207 ff_set_qscale(s, s->qscale + s->dquant);
2209 wrap_y = s->linesize;
2210 wrap_c = s->uvlinesize;
2211 ptr_y = s->new_picture.f->data[0] +
2212 (mb_y * 16 * wrap_y) + mb_x * 16;
2213 ptr_cb = s->new_picture.f->data[1] +
2214 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2215 ptr_cr = s->new_picture.f->data[2] +
2216 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2218 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2219 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2220 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2221 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2222 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2224 16, 16, mb_x * 16, mb_y * 16,
2225 s->width, s->height);
2227 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2229 mb_block_width, mb_block_height,
2230 mb_x * mb_block_width, mb_y * mb_block_height,
2232 ptr_cb = ebuf + 16 * wrap_y;
2233 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2235 mb_block_width, mb_block_height,
2236 mb_x * mb_block_width, mb_y * mb_block_height,
2238 ptr_cr = ebuf + 16 * wrap_y + 16;
2242 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2243 int progressive_score, interlaced_score;
2245 s->interlaced_dct = 0;
2246 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2247 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2248 NULL, wrap_y, 8) - 400;
2250 if (progressive_score > 0) {
2251 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2252 NULL, wrap_y * 2, 8) +
2253 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2254 NULL, wrap_y * 2, 8);
2255 if (progressive_score > interlaced_score) {
2256 s->interlaced_dct = 1;
2258 dct_offset = wrap_y;
2259 uv_dct_offset = wrap_c;
2261 if (s->chroma_format == CHROMA_422 ||
2262 s->chroma_format == CHROMA_444)
2268 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2269 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2270 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2271 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2273 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2277 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2278 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2279 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2280 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2281 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2282 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2283 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2284 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2285 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2286 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2287 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2288 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2292 op_pixels_func (*op_pix)[4];
2293 qpel_mc_func (*op_qpix)[16];
2294 uint8_t *dest_y, *dest_cb, *dest_cr;
2296 dest_y = s->dest[0];
2297 dest_cb = s->dest[1];
2298 dest_cr = s->dest[2];
2300 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2301 op_pix = s->hdsp.put_pixels_tab;
2302 op_qpix = s->qdsp.put_qpel_pixels_tab;
2304 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2305 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2308 if (s->mv_dir & MV_DIR_FORWARD) {
2309 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2310 s->last_picture.f->data,
2312 op_pix = s->hdsp.avg_pixels_tab;
2313 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2315 if (s->mv_dir & MV_DIR_BACKWARD) {
2316 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2317 s->next_picture.f->data,
2321 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2322 int progressive_score, interlaced_score;
2324 s->interlaced_dct = 0;
2325 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2326 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2330 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2331 progressive_score -= 400;
2333 if (progressive_score > 0) {
2334 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2336 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2340 if (progressive_score > interlaced_score) {
2341 s->interlaced_dct = 1;
2343 dct_offset = wrap_y;
2344 uv_dct_offset = wrap_c;
2346 if (s->chroma_format == CHROMA_422)
2352 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2353 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2354 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2355 dest_y + dct_offset, wrap_y);
2356 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2357 dest_y + dct_offset + 8, wrap_y);
2359 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2363 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2364 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2365 if (!s->chroma_y_shift) { /* 422 */
2366 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2367 dest_cb + uv_dct_offset, wrap_c);
2368 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2369 dest_cr + uv_dct_offset, wrap_c);
2372 /* pre quantization */
2373 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2374 2 * s->qscale * s->qscale) {
2376 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2378 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2380 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2381 wrap_y, 8) < 20 * s->qscale)
2383 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2384 wrap_y, 8) < 20 * s->qscale)
2386 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2388 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2390 if (!s->chroma_y_shift) { /* 422 */
2391 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2392 dest_cb + uv_dct_offset,
2393 wrap_c, 8) < 20 * s->qscale)
2395 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2396 dest_cr + uv_dct_offset,
2397 wrap_c, 8) < 20 * s->qscale)
2403 if (s->quantizer_noise_shaping) {
2405 get_visual_weight(weight[0], ptr_y , wrap_y);
2407 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2409 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2411 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2413 get_visual_weight(weight[4], ptr_cb , wrap_c);
2415 get_visual_weight(weight[5], ptr_cr , wrap_c);
2416 if (!s->chroma_y_shift) { /* 422 */
2418 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2421 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2424 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2427 /* DCT & quantize */
2428 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2430 for (i = 0; i < mb_block_count; i++) {
2433 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2434 // FIXME we could decide to change to quantizer instead of
2436 // JS: I don't think that would be a good idea it could lower
2437 // quality instead of improve it. Just INTRADC clipping
2438 // deserves changes in quantizer
2440 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2442 s->block_last_index[i] = -1;
2444 if (s->quantizer_noise_shaping) {
2445 for (i = 0; i < mb_block_count; i++) {
2447 s->block_last_index[i] =
2448 dct_quantize_refine(s, s->block[i], weight[i],
2449 orig[i], i, s->qscale);
2454 if (s->luma_elim_threshold && !s->mb_intra)
2455 for (i = 0; i < 4; i++)
2456 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2457 if (s->chroma_elim_threshold && !s->mb_intra)
2458 for (i = 4; i < mb_block_count; i++)
2459 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2461 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2462 for (i = 0; i < mb_block_count; i++) {
2463 if (s->block_last_index[i] == -1)
2464 s->coded_score[i] = INT_MAX / 256;
2469 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2470 s->block_last_index[4] =
2471 s->block_last_index[5] = 0;
2473 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2474 if (!s->chroma_y_shift) { /* 422 / 444 */
2475 for (i=6; i<12; i++) {
2476 s->block_last_index[i] = 0;
2477 s->block[i][0] = s->block[4][0];
2482 // non c quantize code returns incorrect block_last_index FIXME
2483 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2484 for (i = 0; i < mb_block_count; i++) {
2486 if (s->block_last_index[i] > 0) {
2487 for (j = 63; j > 0; j--) {
2488 if (s->block[i][s->intra_scantable.permutated[j]])
2491 s->block_last_index[i] = j;
2496 /* huffman encode */
2497 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2498 case AV_CODEC_ID_MPEG1VIDEO:
2499 case AV_CODEC_ID_MPEG2VIDEO:
2500 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2501 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2503 case AV_CODEC_ID_MPEG4:
2504 if (CONFIG_MPEG4_ENCODER)
2505 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2507 case AV_CODEC_ID_MSMPEG4V2:
2508 case AV_CODEC_ID_MSMPEG4V3:
2509 case AV_CODEC_ID_WMV1:
2510 if (CONFIG_MSMPEG4_ENCODER)
2511 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2513 case AV_CODEC_ID_WMV2:
2514 if (CONFIG_WMV2_ENCODER)
2515 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2517 case AV_CODEC_ID_H261:
2518 if (CONFIG_H261_ENCODER)
2519 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2521 case AV_CODEC_ID_H263:
2522 case AV_CODEC_ID_H263P:
2523 case AV_CODEC_ID_FLV1:
2524 case AV_CODEC_ID_RV10:
2525 case AV_CODEC_ID_RV20:
2526 if (CONFIG_H263_ENCODER)
2527 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2529 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2530 case AV_CODEC_ID_MJPEG:
2531 case AV_CODEC_ID_AMV:
2532 ff_mjpeg_encode_mb(s, s->block);
2535 case AV_CODEC_ID_SPEEDHQ:
2536 if (CONFIG_SPEEDHQ_ENCODER)
2537 ff_speedhq_encode_mb(s, s->block);
2544 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2546 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2547 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2548 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2551 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2554 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2557 d->mb_skip_run= s->mb_skip_run;
2559 d->last_dc[i] = s->last_dc[i];
2562 d->mv_bits= s->mv_bits;
2563 d->i_tex_bits= s->i_tex_bits;
2564 d->p_tex_bits= s->p_tex_bits;
2565 d->i_count= s->i_count;
2566 d->f_count= s->f_count;
2567 d->b_count= s->b_count;
2568 d->skip_count= s->skip_count;
2569 d->misc_bits= s->misc_bits;
2573 d->qscale= s->qscale;
2574 d->dquant= s->dquant;
2576 d->esc3_level_length= s->esc3_level_length;
2579 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2582 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2583 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2586 d->mb_skip_run= s->mb_skip_run;
2588 d->last_dc[i] = s->last_dc[i];
2591 d->mv_bits= s->mv_bits;
2592 d->i_tex_bits= s->i_tex_bits;
2593 d->p_tex_bits= s->p_tex_bits;
2594 d->i_count= s->i_count;
2595 d->f_count= s->f_count;
2596 d->b_count= s->b_count;
2597 d->skip_count= s->skip_count;
2598 d->misc_bits= s->misc_bits;
2600 d->mb_intra= s->mb_intra;
2601 d->mb_skipped= s->mb_skipped;
2602 d->mv_type= s->mv_type;
2603 d->mv_dir= s->mv_dir;
2605 if(s->data_partitioning){
2607 d->tex_pb= s->tex_pb;
2611 d->block_last_index[i]= s->block_last_index[i];
2612 d->interlaced_dct= s->interlaced_dct;
2613 d->qscale= s->qscale;
2615 d->esc3_level_length= s->esc3_level_length;
2618 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2619 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2620 int *dmin, int *next_block, int motion_x, int motion_y)
2623 uint8_t *dest_backup[3];
2625 copy_context_before_encode(s, backup, type);
2627 s->block= s->blocks[*next_block];
2628 s->pb= pb[*next_block];
2629 if(s->data_partitioning){
2630 s->pb2 = pb2 [*next_block];
2631 s->tex_pb= tex_pb[*next_block];
2635 memcpy(dest_backup, s->dest, sizeof(s->dest));
2636 s->dest[0] = s->sc.rd_scratchpad;
2637 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2638 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2639 av_assert0(s->linesize >= 32); //FIXME
2642 encode_mb(s, motion_x, motion_y);
2644 score= put_bits_count(&s->pb);
2645 if(s->data_partitioning){
2646 score+= put_bits_count(&s->pb2);
2647 score+= put_bits_count(&s->tex_pb);
2650 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2651 ff_mpv_reconstruct_mb(s, s->block);
2653 score *= s->lambda2;
2654 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2658 memcpy(s->dest, dest_backup, sizeof(s->dest));
2665 copy_context_after_encode(best, s, type);
2669 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2670 const uint32_t *sq = ff_square_tab + 256;
2675 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2676 else if(w==8 && h==8)
2677 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2681 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2690 static int sse_mb(MpegEncContext *s){
2694 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2695 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2698 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2699 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2700 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2701 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2703 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2704 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2705 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2708 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2709 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2710 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2713 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2714 MpegEncContext *s= *(void**)arg;
2718 s->me.dia_size= s->avctx->pre_dia_size;
2719 s->first_slice_line=1;
2720 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2721 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2722 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2724 s->first_slice_line=0;
2732 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2733 MpegEncContext *s= *(void**)arg;
2735 s->me.dia_size= s->avctx->dia_size;
2736 s->first_slice_line=1;
2737 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2738 s->mb_x=0; //for block init below
2739 ff_init_block_index(s);
2740 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2741 s->block_index[0]+=2;
2742 s->block_index[1]+=2;
2743 s->block_index[2]+=2;
2744 s->block_index[3]+=2;
2746 /* compute motion vector & mb_type and store in context */
2747 if(s->pict_type==AV_PICTURE_TYPE_B)
2748 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2750 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2752 s->first_slice_line=0;
2757 static int mb_var_thread(AVCodecContext *c, void *arg){
2758 MpegEncContext *s= *(void**)arg;
2761 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2762 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2765 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2767 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2769 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2770 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2772 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2773 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2774 s->me.mb_var_sum_temp += varc;
2780 static void write_slice_end(MpegEncContext *s){
2781 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2782 if(s->partitioned_frame){
2783 ff_mpeg4_merge_partitions(s);
2786 ff_mpeg4_stuffing(&s->pb);
2787 } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2788 s->out_format == FMT_MJPEG) {
2789 ff_mjpeg_encode_stuffing(s);
2790 } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2791 ff_speedhq_end_slice(s);
2794 flush_put_bits(&s->pb);
2796 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2797 s->misc_bits+= get_bits_diff(s);
2800 static void write_mb_info(MpegEncContext *s)
2802 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2803 int offset = put_bits_count(&s->pb);
2804 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2805 int gobn = s->mb_y / s->gob_index;
2807 if (CONFIG_H263_ENCODER)
2808 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2809 bytestream_put_le32(&ptr, offset);
2810 bytestream_put_byte(&ptr, s->qscale);
2811 bytestream_put_byte(&ptr, gobn);
2812 bytestream_put_le16(&ptr, mba);
2813 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2814 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2815 /* 4MV not implemented */
2816 bytestream_put_byte(&ptr, 0); /* hmv2 */
2817 bytestream_put_byte(&ptr, 0); /* vmv2 */
2820 static void update_mb_info(MpegEncContext *s, int startcode)
2824 if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2825 s->mb_info_size += 12;
2826 s->prev_mb_info = s->last_mb_info;
2829 s->prev_mb_info = put_bytes_count(&s->pb, 0);
2830 /* This might have incremented mb_info_size above, and we return without
2831 * actually writing any info into that slot yet. But in that case,
2832 * this will be called again at the start of the after writing the
2833 * start code, actually writing the mb info. */
2837 s->last_mb_info = put_bytes_count(&s->pb, 0);
2838 if (!s->mb_info_size)
2839 s->mb_info_size += 12;
2843 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2845 if (put_bytes_left(&s->pb, 0) < threshold
2846 && s->slice_context_count == 1
2847 && s->pb.buf == s->avctx->internal->byte_buffer) {
2848 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2849 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2851 uint8_t *new_buffer = NULL;
2852 int new_buffer_size = 0;
2854 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2855 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2856 return AVERROR(ENOMEM);
2861 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2862 s->avctx->internal->byte_buffer_size + size_increase);
2864 return AVERROR(ENOMEM);
2866 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2867 av_free(s->avctx->internal->byte_buffer);
2868 s->avctx->internal->byte_buffer = new_buffer;
2869 s->avctx->internal->byte_buffer_size = new_buffer_size;
2870 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2871 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2872 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2874 if (put_bytes_left(&s->pb, 0) < threshold)
2875 return AVERROR(EINVAL);
2879 static int encode_thread(AVCodecContext *c, void *arg){
2880 MpegEncContext *s= *(void**)arg;
2881 int mb_x, mb_y, mb_y_order;
2882 int chr_h= 16>>s->chroma_y_shift;
2884 MpegEncContext best_s = { 0 }, backup_s;
2885 uint8_t bit_buf[2][MAX_MB_BYTES];
2886 uint8_t bit_buf2[2][MAX_MB_BYTES];
2887 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2888 PutBitContext pb[2], pb2[2], tex_pb[2];
2891 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2892 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2893 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2896 s->last_bits= put_bits_count(&s->pb);
2907 /* init last dc values */
2908 /* note: quant matrix value (8) is implied here */
2909 s->last_dc[i] = 128 << s->intra_dc_precision;
2911 s->current_picture.encoding_error[i] = 0;
2913 if(s->codec_id==AV_CODEC_ID_AMV){
2914 s->last_dc[0] = 128*8/13;
2915 s->last_dc[1] = 128*8/14;
2916 s->last_dc[2] = 128*8/14;
2919 memset(s->last_mv, 0, sizeof(s->last_mv));
2923 switch(s->codec_id){
2924 case AV_CODEC_ID_H263:
2925 case AV_CODEC_ID_H263P:
2926 case AV_CODEC_ID_FLV1:
2927 if (CONFIG_H263_ENCODER)
2928 s->gob_index = H263_GOB_HEIGHT(s->height);
2930 case AV_CODEC_ID_MPEG4:
2931 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2932 ff_mpeg4_init_partitions(s);
2938 s->first_slice_line = 1;
2939 s->ptr_lastgob = s->pb.buf;
2940 for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2941 if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
2943 mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
2944 if (first_in_slice && mb_y_order != s->start_mb_y)
2945 ff_speedhq_end_slice(s);
2946 s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
2953 ff_set_qscale(s, s->qscale);
2954 ff_init_block_index(s);
2956 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2957 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2958 int mb_type= s->mb_type[xy];
2962 int size_increase = s->avctx->internal->byte_buffer_size/4
2963 + s->mb_width*MAX_MB_BYTES;
2965 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2966 if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
2967 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2970 if(s->data_partitioning){
2971 if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
2972 put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
2973 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2979 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2980 ff_update_block_index(s);
2982 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2983 ff_h261_reorder_mb_index(s);
2984 xy= s->mb_y*s->mb_stride + s->mb_x;
2985 mb_type= s->mb_type[xy];
2988 /* write gob / video packet header */
2990 int current_packet_size, is_gob_start;
2992 current_packet_size = put_bytes_count(&s->pb, 1)
2993 - (s->ptr_lastgob - s->pb.buf);
2995 is_gob_start = s->rtp_payload_size &&
2996 current_packet_size >= s->rtp_payload_size &&
2999 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3001 switch(s->codec_id){
3002 case AV_CODEC_ID_H263:
3003 case AV_CODEC_ID_H263P:
3004 if(!s->h263_slice_structured)
3005 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3007 case AV_CODEC_ID_MPEG2VIDEO:
3008 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3009 case AV_CODEC_ID_MPEG1VIDEO:
3010 if(s->mb_skip_run) is_gob_start=0;
3012 case AV_CODEC_ID_MJPEG:
3013 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3018 if(s->start_mb_y != mb_y || mb_x!=0){
3021 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3022 ff_mpeg4_init_partitions(s);
3026 av_assert2((put_bits_count(&s->pb)&7) == 0);
3027 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3029 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3030 int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
3031 int d = 100 / s->error_rate;
3033 current_packet_size=0;
3034 s->pb.buf_ptr= s->ptr_lastgob;
3035 av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3039 #if FF_API_RTP_CALLBACK
3040 FF_DISABLE_DEPRECATION_WARNINGS
3041 if (s->avctx->rtp_callback){
3042 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3043 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3045 FF_ENABLE_DEPRECATION_WARNINGS
3047 update_mb_info(s, 1);
3049 switch(s->codec_id){
3050 case AV_CODEC_ID_MPEG4:
3051 if (CONFIG_MPEG4_ENCODER) {
3052 ff_mpeg4_encode_video_packet_header(s);
3053 ff_mpeg4_clean_buffers(s);
3056 case AV_CODEC_ID_MPEG1VIDEO:
3057 case AV_CODEC_ID_MPEG2VIDEO:
3058 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3059 ff_mpeg1_encode_slice_header(s);
3060 ff_mpeg1_clean_buffers(s);
3063 case AV_CODEC_ID_H263:
3064 case AV_CODEC_ID_H263P:
3065 if (CONFIG_H263_ENCODER)
3066 ff_h263_encode_gob_header(s, mb_y);
3070 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3071 int bits= put_bits_count(&s->pb);
3072 s->misc_bits+= bits - s->last_bits;
3076 s->ptr_lastgob += current_packet_size;
3077 s->first_slice_line=1;
3078 s->resync_mb_x=mb_x;
3079 s->resync_mb_y=mb_y;
3083 if( (s->resync_mb_x == s->mb_x)
3084 && s->resync_mb_y+1 == s->mb_y){
3085 s->first_slice_line=0;
3089 s->dquant=0; //only for QP_RD
3091 update_mb_info(s, 0);
3093 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3095 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3097 copy_context_before_encode(&backup_s, s, -1);
3099 best_s.data_partitioning= s->data_partitioning;
3100 best_s.partitioned_frame= s->partitioned_frame;
3101 if(s->data_partitioning){
3102 backup_s.pb2= s->pb2;
3103 backup_s.tex_pb= s->tex_pb;
3106 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3107 s->mv_dir = MV_DIR_FORWARD;
3108 s->mv_type = MV_TYPE_16X16;
3110 s->mv[0][0][0] = s->p_mv_table[xy][0];
3111 s->mv[0][0][1] = s->p_mv_table[xy][1];
3112 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3113 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3115 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3116 s->mv_dir = MV_DIR_FORWARD;
3117 s->mv_type = MV_TYPE_FIELD;
3120 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3121 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3122 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3124 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3125 &dmin, &next_block, 0, 0);
3127 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3128 s->mv_dir = MV_DIR_FORWARD;
3129 s->mv_type = MV_TYPE_16X16;
3133 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3134 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3136 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3137 s->mv_dir = MV_DIR_FORWARD;
3138 s->mv_type = MV_TYPE_8X8;
3141 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3142 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3144 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3145 &dmin, &next_block, 0, 0);
3147 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3148 s->mv_dir = MV_DIR_FORWARD;
3149 s->mv_type = MV_TYPE_16X16;
3151 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3152 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3153 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3154 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3156 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3157 s->mv_dir = MV_DIR_BACKWARD;
3158 s->mv_type = MV_TYPE_16X16;
3160 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3161 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3162 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3163 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3165 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3166 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3167 s->mv_type = MV_TYPE_16X16;
3169 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3170 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3171 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3172 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3173 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3174 &dmin, &next_block, 0, 0);
3176 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3177 s->mv_dir = MV_DIR_FORWARD;
3178 s->mv_type = MV_TYPE_FIELD;
3181 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3182 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3183 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3185 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3186 &dmin, &next_block, 0, 0);
3188 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3189 s->mv_dir = MV_DIR_BACKWARD;
3190 s->mv_type = MV_TYPE_FIELD;
3193 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3194 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3195 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3197 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3198 &dmin, &next_block, 0, 0);
3200 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3201 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3202 s->mv_type = MV_TYPE_FIELD;
3204 for(dir=0; dir<2; dir++){
3206 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3207 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3208 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3211 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3212 &dmin, &next_block, 0, 0);
3214 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3216 s->mv_type = MV_TYPE_16X16;
3220 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3221 &dmin, &next_block, 0, 0);
3222 if(s->h263_pred || s->h263_aic){
3224 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3226 ff_clean_intra_table_entries(s); //old mode?
3230 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3231 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3232 const int last_qp= backup_s.qscale;
3235 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3236 static const int dquant_tab[4]={-1,1,-2,2};
3237 int storecoefs = s->mb_intra && s->dc_val[0];
3239 av_assert2(backup_s.dquant == 0);
3242 s->mv_dir= best_s.mv_dir;
3243 s->mv_type = MV_TYPE_16X16;
3244 s->mb_intra= best_s.mb_intra;
3245 s->mv[0][0][0] = best_s.mv[0][0][0];
3246 s->mv[0][0][1] = best_s.mv[0][0][1];
3247 s->mv[1][0][0] = best_s.mv[1][0][0];
3248 s->mv[1][0][1] = best_s.mv[1][0][1];
3250 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3251 for(; qpi<4; qpi++){
3252 int dquant= dquant_tab[qpi];
3253 qp= last_qp + dquant;
3254 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3256 backup_s.dquant= dquant;
3259 dc[i]= s->dc_val[0][ s->block_index[i] ];
3260 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3264 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3265 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3266 if(best_s.qscale != qp){
3269 s->dc_val[0][ s->block_index[i] ]= dc[i];
3270 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3277 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3278 int mx= s->b_direct_mv_table[xy][0];
3279 int my= s->b_direct_mv_table[xy][1];
3281 backup_s.dquant = 0;
3282 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3284 ff_mpeg4_set_direct_mv(s, mx, my);
3285 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3286 &dmin, &next_block, mx, my);
3288 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3289 backup_s.dquant = 0;
3290 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3292 ff_mpeg4_set_direct_mv(s, 0, 0);
3293 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3294 &dmin, &next_block, 0, 0);
3296 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3299 coded |= s->block_last_index[i];
3302 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3303 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3304 mx=my=0; //FIXME find the one we actually used
3305 ff_mpeg4_set_direct_mv(s, mx, my);
3306 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3314 s->mv_dir= best_s.mv_dir;
3315 s->mv_type = best_s.mv_type;
3317 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3318 s->mv[0][0][1] = best_s.mv[0][0][1];
3319 s->mv[1][0][0] = best_s.mv[1][0][0];
3320 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3323 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3324 &dmin, &next_block, mx, my);
3329 s->current_picture.qscale_table[xy] = best_s.qscale;
3331 copy_context_after_encode(s, &best_s, -1);
3333 pb_bits_count= put_bits_count(&s->pb);
3334 flush_put_bits(&s->pb);
3335 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3338 if(s->data_partitioning){
3339 pb2_bits_count= put_bits_count(&s->pb2);
3340 flush_put_bits(&s->pb2);
3341 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3342 s->pb2= backup_s.pb2;
3344 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3345 flush_put_bits(&s->tex_pb);
3346 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3347 s->tex_pb= backup_s.tex_pb;
3349 s->last_bits= put_bits_count(&s->pb);
3351 if (CONFIG_H263_ENCODER &&
3352 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3353 ff_h263_update_motion_val(s);
3355 if(next_block==0){ //FIXME 16 vs linesize16
3356 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3357 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3358 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3361 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3362 ff_mpv_reconstruct_mb(s, s->block);
3364 int motion_x = 0, motion_y = 0;
3365 s->mv_type=MV_TYPE_16X16;
3366 // only one MB-Type possible
3369 case CANDIDATE_MB_TYPE_INTRA:
3372 motion_x= s->mv[0][0][0] = 0;
3373 motion_y= s->mv[0][0][1] = 0;
3375 case CANDIDATE_MB_TYPE_INTER:
3376 s->mv_dir = MV_DIR_FORWARD;
3378 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3379 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3381 case CANDIDATE_MB_TYPE_INTER_I:
3382 s->mv_dir = MV_DIR_FORWARD;
3383 s->mv_type = MV_TYPE_FIELD;
3386 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3387 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3388 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3391 case CANDIDATE_MB_TYPE_INTER4V:
3392 s->mv_dir = MV_DIR_FORWARD;
3393 s->mv_type = MV_TYPE_8X8;
3396 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3397 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3400 case CANDIDATE_MB_TYPE_DIRECT:
3401 if (CONFIG_MPEG4_ENCODER) {
3402 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3404 motion_x=s->b_direct_mv_table[xy][0];
3405 motion_y=s->b_direct_mv_table[xy][1];
3406 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3409 case CANDIDATE_MB_TYPE_DIRECT0:
3410 if (CONFIG_MPEG4_ENCODER) {
3411 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3413 ff_mpeg4_set_direct_mv(s, 0, 0);
3416 case CANDIDATE_MB_TYPE_BIDIR:
3417 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3419 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3420 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3421 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3422 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3424 case CANDIDATE_MB_TYPE_BACKWARD:
3425 s->mv_dir = MV_DIR_BACKWARD;
3427 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3428 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3430 case CANDIDATE_MB_TYPE_FORWARD:
3431 s->mv_dir = MV_DIR_FORWARD;
3433 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3434 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3436 case CANDIDATE_MB_TYPE_FORWARD_I:
3437 s->mv_dir = MV_DIR_FORWARD;
3438 s->mv_type = MV_TYPE_FIELD;
3441 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3442 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3443 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3446 case CANDIDATE_MB_TYPE_BACKWARD_I:
3447 s->mv_dir = MV_DIR_BACKWARD;
3448 s->mv_type = MV_TYPE_FIELD;
3451 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3452 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3453 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3456 case CANDIDATE_MB_TYPE_BIDIR_I:
3457 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3458 s->mv_type = MV_TYPE_FIELD;
3460 for(dir=0; dir<2; dir++){
3462 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3463 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3464 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3469 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3472 encode_mb(s, motion_x, motion_y);
3474 // RAL: Update last macroblock type
3475 s->last_mv_dir = s->mv_dir;
3477 if (CONFIG_H263_ENCODER &&
3478 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3479 ff_h263_update_motion_val(s);
3481 ff_mpv_reconstruct_mb(s, s->block);
3484 /* clean the MV table in IPS frames for direct mode in B-frames */
3485 if(s->mb_intra /* && I,P,S_TYPE */){
3486 s->p_mv_table[xy][0]=0;
3487 s->p_mv_table[xy][1]=0;
3490 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3494 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3495 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3497 s->current_picture.encoding_error[0] += sse(
3498 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3499 s->dest[0], w, h, s->linesize);
3500 s->current_picture.encoding_error[1] += sse(
3501 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3502 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3503 s->current_picture.encoding_error[2] += sse(
3504 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3505 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3508 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3509 ff_h263_loop_filter(s);
3511 ff_dlog(s->avctx, "MB %d %d bits\n",
3512 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3516 //not beautiful here but we must write it before flushing so it has to be here
3517 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3518 ff_msmpeg4_encode_ext_header(s);
3522 #if FF_API_RTP_CALLBACK
3523 FF_DISABLE_DEPRECATION_WARNINGS
3524 /* Send the last GOB if RTP */
3525 if (s->avctx->rtp_callback) {
3526 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3527 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3528 /* Call the RTP callback to send the last GOB */
3530 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3532 FF_ENABLE_DEPRECATION_WARNINGS
3538 #define MERGE(field) dst->field += src->field; src->field=0
3539 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3540 MERGE(me.scene_change_score);
3541 MERGE(me.mc_mb_var_sum_temp);
3542 MERGE(me.mb_var_sum_temp);
3545 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3548 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3549 MERGE(dct_count[1]);
3558 MERGE(er.error_count);
3559 MERGE(padding_bug_score);
3560 MERGE(current_picture.encoding_error[0]);
3561 MERGE(current_picture.encoding_error[1]);
3562 MERGE(current_picture.encoding_error[2]);
3564 if (dst->noise_reduction){
3565 for(i=0; i<64; i++){
3566 MERGE(dct_error_sum[0][i]);
3567 MERGE(dct_error_sum[1][i]);
3571 av_assert1(put_bits_count(&src->pb) % 8 ==0);
3572 av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3573 ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3574 flush_put_bits(&dst->pb);
3577 static int estimate_qp(MpegEncContext *s, int dry_run){
3578 if (s->next_lambda){
3579 s->current_picture_ptr->f->quality =
3580 s->current_picture.f->quality = s->next_lambda;
3581 if(!dry_run) s->next_lambda= 0;
3582 } else if (!s->fixed_qscale) {
3583 int quality = ff_rate_estimate_qscale(s, dry_run);
3584 s->current_picture_ptr->f->quality =
3585 s->current_picture.f->quality = quality;
3586 if (s->current_picture.f->quality < 0)
3590 if(s->adaptive_quant){
3591 switch(s->codec_id){
3592 case AV_CODEC_ID_MPEG4:
3593 if (CONFIG_MPEG4_ENCODER)
3594 ff_clean_mpeg4_qscales(s);
3596 case AV_CODEC_ID_H263:
3597 case AV_CODEC_ID_H263P:
3598 case AV_CODEC_ID_FLV1:
3599 if (CONFIG_H263_ENCODER)
3600 ff_clean_h263_qscales(s);
3603 ff_init_qscale_tab(s);
3606 s->lambda= s->lambda_table[0];
3609 s->lambda = s->current_picture.f->quality;
3614 /* must be called before writing the header */
3615 static void set_frame_distances(MpegEncContext * s){
3616 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3617 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3619 if(s->pict_type==AV_PICTURE_TYPE_B){
3620 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3621 av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3623 s->pp_time= s->time - s->last_non_b_time;
3624 s->last_non_b_time= s->time;
3625 av_assert1(s->picture_number==0 || s->pp_time > 0);
3629 static int encode_picture(MpegEncContext *s, int picture_number)
3633 int context_count = s->slice_context_count;
3635 s->picture_number = picture_number;
3637 /* Reset the average MB variance */
3638 s->me.mb_var_sum_temp =
3639 s->me.mc_mb_var_sum_temp = 0;
3641 /* we need to initialize some time vars before we can encode B-frames */
3642 // RAL: Condition added for MPEG1VIDEO
3643 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3644 set_frame_distances(s);
3645 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3646 ff_set_mpeg4_time(s);
3648 s->me.scene_change_score=0;
3650 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3652 if(s->pict_type==AV_PICTURE_TYPE_I){
3653 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3654 else s->no_rounding=0;
3655 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3656 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3657 s->no_rounding ^= 1;
3660 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3661 if (estimate_qp(s,1) < 0)
3663 ff_get_2pass_fcode(s);
3664 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3665 if(s->pict_type==AV_PICTURE_TYPE_B)
3666 s->lambda= s->last_lambda_for[s->pict_type];
3668 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3672 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3673 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3674 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3675 s->q_chroma_intra_matrix = s->q_intra_matrix;
3676 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3679 s->mb_intra=0; //for the rate distortion & bit compare functions
3680 for(i=1; i<context_count; i++){
3681 ret = ff_update_duplicate_context(s->thread_context[i], s);
3689 /* Estimate motion for every MB */
3690 if(s->pict_type != AV_PICTURE_TYPE_I){
3691 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3692 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3693 if (s->pict_type != AV_PICTURE_TYPE_B) {
3694 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3696 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3700 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3701 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3703 for(i=0; i<s->mb_stride*s->mb_height; i++)
3704 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3706 if(!s->fixed_qscale){
3707 /* finding spatial complexity for I-frame rate control */
3708 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3711 for(i=1; i<context_count; i++){
3712 merge_context_after_me(s, s->thread_context[i]);
3714 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3715 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3718 if (s->me.scene_change_score > s->scenechange_threshold &&
3719 s->pict_type == AV_PICTURE_TYPE_P) {
3720 s->pict_type= AV_PICTURE_TYPE_I;
3721 for(i=0; i<s->mb_stride*s->mb_height; i++)
3722 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3723 if(s->msmpeg4_version >= 3)
3725 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3726 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3730 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3731 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3733 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3735 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3736 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3737 s->f_code= FFMAX3(s->f_code, a, b);
3740 ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
3741 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3742 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3746 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3747 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3752 if(s->pict_type==AV_PICTURE_TYPE_B){
3755 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3756 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3757 s->f_code = FFMAX(a, b);
3759 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3760 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3761 s->b_code = FFMAX(a, b);
3763 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3764 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3765 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3766 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3767 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3769 for(dir=0; dir<2; dir++){
3772 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3773 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3774 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3775 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3783 if (estimate_qp(s, 0) < 0)
3786 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3787 s->pict_type == AV_PICTURE_TYPE_I &&
3788 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3789 s->qscale= 3; //reduce clipping problems
3791 if (s->out_format == FMT_MJPEG) {
3792 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3793 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3795 if (s->avctx->intra_matrix) {
3797 luma_matrix = s->avctx->intra_matrix;
3799 if (s->avctx->chroma_intra_matrix)
3800 chroma_matrix = s->avctx->chroma_intra_matrix;
3802 /* for mjpeg, we do include qscale in the matrix */
3804 int j = s->idsp.idct_permutation[i];
3806 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3807 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3809 s->y_dc_scale_table=
3810 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3811 s->chroma_intra_matrix[0] =
3812 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3813 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3814 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3815 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3816 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3819 if(s->codec_id == AV_CODEC_ID_AMV){
3820 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3821 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3823 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3825 s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3826 s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3828 s->y_dc_scale_table= y;
3829 s->c_dc_scale_table= c;
3830 s->intra_matrix[0] = 13;
3831 s->chroma_intra_matrix[0] = 14;
3832 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3833 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3834 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3835 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3839 if (s->out_format == FMT_SPEEDHQ) {
3840 s->y_dc_scale_table=
3841 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3844 //FIXME var duplication
3845 s->current_picture_ptr->f->key_frame =
3846 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3847 s->current_picture_ptr->f->pict_type =
3848 s->current_picture.f->pict_type = s->pict_type;
3850 if (s->current_picture.f->key_frame)
3851 s->picture_in_gop_number=0;
3853 s->mb_x = s->mb_y = 0;
3854 s->last_bits= put_bits_count(&s->pb);
3855 switch(s->out_format) {
3856 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3858 /* s->huffman == HUFFMAN_TABLE_OPTIMAL can only be true for MJPEG. */
3859 if (!CONFIG_MJPEG_ENCODER || s->huffman != HUFFMAN_TABLE_OPTIMAL)
3860 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3861 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3865 if (CONFIG_SPEEDHQ_ENCODER)
3866 ff_speedhq_encode_picture_header(s);
3869 if (CONFIG_H261_ENCODER)
3870 ff_h261_encode_picture_header(s, picture_number);
3873 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3874 ff_wmv2_encode_picture_header(s, picture_number);
3875 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3876 ff_msmpeg4_encode_picture_header(s, picture_number);
3877 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3878 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3881 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3882 ret = ff_rv10_encode_picture_header(s, picture_number);
3886 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3887 ff_rv20_encode_picture_header(s, picture_number);
3888 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3889 ff_flv_encode_picture_header(s, picture_number);
3890 else if (CONFIG_H263_ENCODER)
3891 ff_h263_encode_picture_header(s, picture_number);
3894 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3895 ff_mpeg1_encode_picture_header(s, picture_number);
3900 bits= put_bits_count(&s->pb);
3901 s->header_bits= bits - s->last_bits;
3903 for(i=1; i<context_count; i++){
3904 update_duplicate_context_after_me(s->thread_context[i], s);
3906 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3907 for(i=1; i<context_count; i++){
3908 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3909 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3910 merge_context_after_encode(s, s->thread_context[i]);
3916 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3917 const int intra= s->mb_intra;
3920 s->dct_count[intra]++;
3922 for(i=0; i<64; i++){
3923 int level= block[i];
3927 s->dct_error_sum[intra][i] += level;
3928 level -= s->dct_offset[intra][i];
3929 if(level<0) level=0;
3931 s->dct_error_sum[intra][i] -= level;
3932 level += s->dct_offset[intra][i];
3933 if(level>0) level=0;
3940 static int dct_quantize_trellis_c(MpegEncContext *s,
3941 int16_t *block, int n,
3942 int qscale, int *overflow){
3944 const uint16_t *matrix;
3945 const uint8_t *scantable;
3946 const uint8_t *perm_scantable;
3948 unsigned int threshold1, threshold2;
3960 int coeff_count[64];
3961 int qmul, qadd, start_i, last_non_zero, i, dc;
3962 const int esc_length= s->ac_esc_length;
3964 uint8_t * last_length;
3965 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3968 s->fdsp.fdct(block);
3970 if(s->dct_error_sum)
3971 s->denoise_dct(s, block);
3973 qadd= ((qscale-1)|1)*8;
3975 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3976 else mpeg2_qscale = qscale << 1;
3980 scantable= s->intra_scantable.scantable;
3981 perm_scantable= s->intra_scantable.permutated;
3989 /* For AIC we skip quant/dequant of INTRADC */
3994 /* note: block[0] is assumed to be positive */
3995 block[0] = (block[0] + (q >> 1)) / q;
3998 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3999 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4000 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4001 bias= 1<<(QMAT_SHIFT-1);
4003 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4004 length = s->intra_chroma_ac_vlc_length;
4005 last_length= s->intra_chroma_ac_vlc_last_length;
4007 length = s->intra_ac_vlc_length;
4008 last_length= s->intra_ac_vlc_last_length;
4011 scantable= s->inter_scantable.scantable;
4012 perm_scantable= s->inter_scantable.permutated;
4015 qmat = s->q_inter_matrix[qscale];
4016 matrix = s->inter_matrix;
4017 length = s->inter_ac_vlc_length;
4018 last_length= s->inter_ac_vlc_last_length;
4022 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4023 threshold2= (threshold1<<1);
4025 for(i=63; i>=start_i; i--) {
4026 const int j = scantable[i];
4027 int level = block[j] * qmat[j];
4029 if(((unsigned)(level+threshold1))>threshold2){
4035 for(i=start_i; i<=last_non_zero; i++) {
4036 const int j = scantable[i];
4037 int level = block[j] * qmat[j];
4039 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4040 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4041 if(((unsigned)(level+threshold1))>threshold2){
4043 level= (bias + level)>>QMAT_SHIFT;
4045 coeff[1][i]= level-1;
4046 // coeff[2][k]= level-2;
4048 level= (bias - level)>>QMAT_SHIFT;
4049 coeff[0][i]= -level;
4050 coeff[1][i]= -level+1;
4051 // coeff[2][k]= -level+2;
4053 coeff_count[i]= FFMIN(level, 2);
4054 av_assert2(coeff_count[i]);
4057 coeff[0][i]= (level>>31)|1;
4062 *overflow= s->max_qcoeff < max; //overflow might have happened
4064 if(last_non_zero < start_i){
4065 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4066 return last_non_zero;
4069 score_tab[start_i]= 0;
4070 survivor[0]= start_i;
4073 for(i=start_i; i<=last_non_zero; i++){
4074 int level_index, j, zero_distortion;
4075 int dct_coeff= FFABS(block[ scantable[i] ]);
4076 int best_score=256*256*256*120;
4078 if (s->fdsp.fdct == ff_fdct_ifast)
4079 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4080 zero_distortion= dct_coeff*dct_coeff;
4082 for(level_index=0; level_index < coeff_count[i]; level_index++){
4084 int level= coeff[level_index][i];
4085 const int alevel= FFABS(level);
4090 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4091 unquant_coeff= alevel*qmul + qadd;
4092 } else if(s->out_format == FMT_MJPEG) {
4093 j = s->idsp.idct_permutation[scantable[i]];
4094 unquant_coeff = alevel * matrix[j] * 8;
4096 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4098 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4099 unquant_coeff = (unquant_coeff - 1) | 1;
4101 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4102 unquant_coeff = (unquant_coeff - 1) | 1;
4107 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4109 if((level&(~127)) == 0){
4110 for(j=survivor_count-1; j>=0; j--){
4111 int run= i - survivor[j];
4112 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4113 score += score_tab[i-run];
4115 if(score < best_score){
4118 level_tab[i+1]= level-64;
4122 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4123 for(j=survivor_count-1; j>=0; j--){
4124 int run= i - survivor[j];
4125 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4126 score += score_tab[i-run];
4127 if(score < last_score){
4130 last_level= level-64;
4136 distortion += esc_length*lambda;
4137 for(j=survivor_count-1; j>=0; j--){
4138 int run= i - survivor[j];
4139 int score= distortion + score_tab[i-run];
4141 if(score < best_score){
4144 level_tab[i+1]= level-64;
4148 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4149 for(j=survivor_count-1; j>=0; j--){
4150 int run= i - survivor[j];
4151 int score= distortion + score_tab[i-run];
4152 if(score < last_score){
4155 last_level= level-64;
4163 score_tab[i+1]= best_score;
4165 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4166 if(last_non_zero <= 27){
4167 for(; survivor_count; survivor_count--){
4168 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4172 for(; survivor_count; survivor_count--){
4173 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4178 survivor[ survivor_count++ ]= i+1;
4181 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4182 last_score= 256*256*256*120;
4183 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4184 int score= score_tab[i];
4186 score += lambda * 2; // FIXME more exact?
4188 if(score < last_score){
4191 last_level= level_tab[i];
4192 last_run= run_tab[i];
4197 s->coded_score[n] = last_score;
4199 dc= FFABS(block[0]);
4200 last_non_zero= last_i - 1;
4201 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4203 if(last_non_zero < start_i)
4204 return last_non_zero;
4206 if(last_non_zero == 0 && start_i == 0){
4208 int best_score= dc * dc;
4210 for(i=0; i<coeff_count[0]; i++){
4211 int level= coeff[i][0];
4212 int alevel= FFABS(level);
4213 int unquant_coeff, score, distortion;
4215 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4216 unquant_coeff= (alevel*qmul + qadd)>>3;
4218 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4219 unquant_coeff = (unquant_coeff - 1) | 1;
4221 unquant_coeff = (unquant_coeff + 4) >> 3;
4222 unquant_coeff<<= 3 + 3;
4224 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4226 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4227 else score= distortion + esc_length*lambda;
4229 if(score < best_score){
4231 best_level= level - 64;
4234 block[0]= best_level;
4235 s->coded_score[n] = best_score - dc*dc;
4236 if(best_level == 0) return -1;
4237 else return last_non_zero;
4241 av_assert2(last_level);
4243 block[ perm_scantable[last_non_zero] ]= last_level;
4246 for(; i>start_i; i -= run_tab[i] + 1){
4247 block[ perm_scantable[i-1] ]= level_tab[i];
4250 return last_non_zero;
4253 static int16_t basis[64][64];
4255 static void build_basis(uint8_t *perm){
4262 double s= 0.25*(1<<BASIS_SHIFT);
4264 int perm_index= perm[index];
4265 if(i==0) s*= sqrt(0.5);
4266 if(j==0) s*= sqrt(0.5);
4267 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4274 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4275 int16_t *block, int16_t *weight, int16_t *orig,
4278 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4279 const uint8_t *scantable;
4280 const uint8_t *perm_scantable;
4281 // unsigned int threshold1, threshold2;
4286 int qmul, qadd, start_i, last_non_zero, i, dc;
4288 uint8_t * last_length;
4290 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4292 if(basis[0][0] == 0)
4293 build_basis(s->idsp.idct_permutation);
4298 scantable= s->intra_scantable.scantable;
4299 perm_scantable= s->intra_scantable.permutated;
4306 /* For AIC we skip quant/dequant of INTRADC */
4310 q <<= RECON_SHIFT-3;
4311 /* note: block[0] is assumed to be positive */
4313 // block[0] = (block[0] + (q >> 1)) / q;
4315 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4316 // bias= 1<<(QMAT_SHIFT-1);
4317 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4318 length = s->intra_chroma_ac_vlc_length;
4319 last_length= s->intra_chroma_ac_vlc_last_length;
4321 length = s->intra_ac_vlc_length;
4322 last_length= s->intra_ac_vlc_last_length;
4325 scantable= s->inter_scantable.scantable;
4326 perm_scantable= s->inter_scantable.permutated;
4329 length = s->inter_ac_vlc_length;
4330 last_length= s->inter_ac_vlc_last_length;
4332 last_non_zero = s->block_last_index[n];
4334 dc += (1<<(RECON_SHIFT-1));
4335 for(i=0; i<64; i++){
4336 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4340 for(i=0; i<64; i++){
4345 w= FFABS(weight[i]) + qns*one;
4346 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4349 // w=weight[i] = (63*qns + (w/2)) / w;
4352 av_assert2(w<(1<<6));
4355 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4359 for(i=start_i; i<=last_non_zero; i++){
4360 int j= perm_scantable[i];
4361 const int level= block[j];
4365 if(level<0) coeff= qmul*level - qadd;
4366 else coeff= qmul*level + qadd;
4367 run_tab[rle_index++]=run;
4370 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4377 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4380 int run2, best_unquant_change=0, analyze_gradient;
4381 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4383 if(analyze_gradient){
4384 for(i=0; i<64; i++){
4387 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4393 const int level= block[0];
4394 int change, old_coeff;
4396 av_assert2(s->mb_intra);
4400 for(change=-1; change<=1; change+=2){
4401 int new_level= level + change;
4402 int score, new_coeff;
4404 new_coeff= q*new_level;
4405 if(new_coeff >= 2048 || new_coeff < 0)
4408 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4409 new_coeff - old_coeff);
4410 if(score<best_score){
4413 best_change= change;
4414 best_unquant_change= new_coeff - old_coeff;
4421 run2= run_tab[rle_index++];
4425 for(i=start_i; i<64; i++){
4426 int j= perm_scantable[i];
4427 const int level= block[j];
4428 int change, old_coeff;
4430 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4434 if(level<0) old_coeff= qmul*level - qadd;
4435 else old_coeff= qmul*level + qadd;
4436 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4440 av_assert2(run2>=0 || i >= last_non_zero );
4443 for(change=-1; change<=1; change+=2){
4444 int new_level= level + change;
4445 int score, new_coeff, unquant_change;
4448 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4452 if(new_level<0) new_coeff= qmul*new_level - qadd;
4453 else new_coeff= qmul*new_level + qadd;
4454 if(new_coeff >= 2048 || new_coeff <= -2048)
4456 //FIXME check for overflow
4459 if(level < 63 && level > -63){
4460 if(i < last_non_zero)
4461 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4462 - length[UNI_AC_ENC_INDEX(run, level+64)];
4464 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4465 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4468 av_assert2(FFABS(new_level)==1);
4470 if(analyze_gradient){
4471 int g= d1[ scantable[i] ];
4472 if(g && (g^new_level) >= 0)
4476 if(i < last_non_zero){
4477 int next_i= i + run2 + 1;
4478 int next_level= block[ perm_scantable[next_i] ] + 64;
4480 if(next_level&(~127))
4483 if(next_i < last_non_zero)
4484 score += length[UNI_AC_ENC_INDEX(run, 65)]
4485 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4486 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4488 score += length[UNI_AC_ENC_INDEX(run, 65)]
4489 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4490 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4492 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4494 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4495 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4501 av_assert2(FFABS(level)==1);
4503 if(i < last_non_zero){
4504 int next_i= i + run2 + 1;
4505 int next_level= block[ perm_scantable[next_i] ] + 64;
4507 if(next_level&(~127))
4510 if(next_i < last_non_zero)
4511 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4512 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4513 - length[UNI_AC_ENC_INDEX(run, 65)];
4515 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4516 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4517 - length[UNI_AC_ENC_INDEX(run, 65)];
4519 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4521 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4522 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4529 unquant_change= new_coeff - old_coeff;
4530 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4532 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4534 if(score<best_score){
4537 best_change= change;
4538 best_unquant_change= unquant_change;
4542 prev_level= level + 64;
4543 if(prev_level&(~127))
4553 int j= perm_scantable[ best_coeff ];
4555 block[j] += best_change;
4557 if(best_coeff > last_non_zero){
4558 last_non_zero= best_coeff;
4559 av_assert2(block[j]);
4561 for(; last_non_zero>=start_i; last_non_zero--){
4562 if(block[perm_scantable[last_non_zero]])
4569 for(i=start_i; i<=last_non_zero; i++){
4570 int j= perm_scantable[i];
4571 const int level= block[j];
4574 run_tab[rle_index++]=run;
4581 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4587 return last_non_zero;
4591 * Permute an 8x8 block according to permutation.
4592 * @param block the block which will be permuted according to
4593 * the given permutation vector
4594 * @param permutation the permutation vector
4595 * @param last the last non zero coefficient in scantable order, used to
4596 * speed the permutation up
4597 * @param scantable the used scantable, this is only used to speed the
4598 * permutation up, the block is not (inverse) permutated
4599 * to scantable order!
4601 void ff_block_permute(int16_t *block, uint8_t *permutation,
4602 const uint8_t *scantable, int last)
4609 //FIXME it is ok but not clean and might fail for some permutations
4610 // if (permutation[1] == 1)
4613 for (i = 0; i <= last; i++) {
4614 const int j = scantable[i];
4619 for (i = 0; i <= last; i++) {
4620 const int j = scantable[i];
4621 const int perm_j = permutation[j];
4622 block[perm_j] = temp[j];
4626 int ff_dct_quantize_c(MpegEncContext *s,
4627 int16_t *block, int n,
4628 int qscale, int *overflow)
4630 int i, j, level, last_non_zero, q, start_i;
4632 const uint8_t *scantable;
4635 unsigned int threshold1, threshold2;
4637 s->fdsp.fdct(block);
4639 if(s->dct_error_sum)
4640 s->denoise_dct(s, block);
4643 scantable= s->intra_scantable.scantable;
4651 /* For AIC we skip quant/dequant of INTRADC */
4654 /* note: block[0] is assumed to be positive */
4655 block[0] = (block[0] + (q >> 1)) / q;
4658 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4659 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4661 scantable= s->inter_scantable.scantable;
4664 qmat = s->q_inter_matrix[qscale];
4665 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4667 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4668 threshold2= (threshold1<<1);
4669 for(i=63;i>=start_i;i--) {
4671 level = block[j] * qmat[j];
4673 if(((unsigned)(level+threshold1))>threshold2){
4680 for(i=start_i; i<=last_non_zero; i++) {
4682 level = block[j] * qmat[j];
4684 // if( bias+level >= (1<<QMAT_SHIFT)
4685 // || bias-level >= (1<<QMAT_SHIFT)){
4686 if(((unsigned)(level+threshold1))>threshold2){
4688 level= (bias + level)>>QMAT_SHIFT;
4691 level= (bias - level)>>QMAT_SHIFT;
4699 *overflow= s->max_qcoeff < max; //overflow might have happened
4701 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4702 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4703 ff_block_permute(block, s->idsp.idct_permutation,
4704 scantable, last_non_zero);
4706 return last_non_zero;
4709 #define OFFSET(x) offsetof(MpegEncContext, x)
4710 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4711 static const AVOption h263_options[] = {
4712 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4713 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4715 #if FF_API_MPEGVIDEO_OPTS
4716 FF_MPV_DEPRECATED_MPEG_QUANT_OPT
4717 FF_MPV_DEPRECATED_A53_CC_OPT
4718 FF_MPV_DEPRECATED_MATRIX_OPT
4723 static const AVClass h263_class = {
4724 .class_name = "H.263 encoder",
4725 .item_name = av_default_item_name,
4726 .option = h263_options,
4727 .version = LIBAVUTIL_VERSION_INT,
4730 AVCodec ff_h263_encoder = {
4732 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4733 .type = AVMEDIA_TYPE_VIDEO,
4734 .id = AV_CODEC_ID_H263,
4735 .priv_data_size = sizeof(MpegEncContext),
4736 .init = ff_mpv_encode_init,
4737 .encode2 = ff_mpv_encode_picture,
4738 .close = ff_mpv_encode_end,
4739 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4740 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4741 .priv_class = &h263_class,
4744 static const AVOption h263p_options[] = {
4745 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4746 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4747 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4748 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4750 #if FF_API_MPEGVIDEO_OPTS
4751 FF_MPV_DEPRECATED_MPEG_QUANT_OPT
4752 FF_MPV_DEPRECATED_A53_CC_OPT
4753 FF_MPV_DEPRECATED_MATRIX_OPT
4757 static const AVClass h263p_class = {
4758 .class_name = "H.263p encoder",
4759 .item_name = av_default_item_name,
4760 .option = h263p_options,
4761 .version = LIBAVUTIL_VERSION_INT,
4764 AVCodec ff_h263p_encoder = {
4766 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4767 .type = AVMEDIA_TYPE_VIDEO,
4768 .id = AV_CODEC_ID_H263P,
4769 .priv_data_size = sizeof(MpegEncContext),
4770 .init = ff_mpv_encode_init,
4771 .encode2 = ff_mpv_encode_picture,
4772 .close = ff_mpv_encode_end,
4773 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4774 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4775 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4776 .priv_class = &h263p_class,
4779 static const AVClass msmpeg4v2_class = {
4780 .class_name = "msmpeg4v2 encoder",
4781 .item_name = av_default_item_name,
4782 .option = ff_mpv_generic_options,
4783 .version = LIBAVUTIL_VERSION_INT,
4786 AVCodec ff_msmpeg4v2_encoder = {
4787 .name = "msmpeg4v2",
4788 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4789 .type = AVMEDIA_TYPE_VIDEO,
4790 .id = AV_CODEC_ID_MSMPEG4V2,
4791 .priv_data_size = sizeof(MpegEncContext),
4792 .init = ff_mpv_encode_init,
4793 .encode2 = ff_mpv_encode_picture,
4794 .close = ff_mpv_encode_end,
4795 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4796 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4797 .priv_class = &msmpeg4v2_class,
4800 static const AVClass msmpeg4v3_class = {
4801 .class_name = "msmpeg4v3 encoder",
4802 .item_name = av_default_item_name,
4803 .option = ff_mpv_generic_options,
4804 .version = LIBAVUTIL_VERSION_INT,
4807 AVCodec ff_msmpeg4v3_encoder = {
4809 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4810 .type = AVMEDIA_TYPE_VIDEO,
4811 .id = AV_CODEC_ID_MSMPEG4V3,
4812 .priv_data_size = sizeof(MpegEncContext),
4813 .init = ff_mpv_encode_init,
4814 .encode2 = ff_mpv_encode_picture,
4815 .close = ff_mpv_encode_end,
4816 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4817 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4818 .priv_class = &msmpeg4v3_class,
4821 static const AVClass wmv1_class = {
4822 .class_name = "wmv1 encoder",
4823 .item_name = av_default_item_name,
4824 .option = ff_mpv_generic_options,
4825 .version = LIBAVUTIL_VERSION_INT,
4828 AVCodec ff_wmv1_encoder = {
4830 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4831 .type = AVMEDIA_TYPE_VIDEO,
4832 .id = AV_CODEC_ID_WMV1,
4833 .priv_data_size = sizeof(MpegEncContext),
4834 .init = ff_mpv_encode_init,
4835 .encode2 = ff_mpv_encode_picture,
4836 .close = ff_mpv_encode_end,
4837 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4838 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4839 .priv_class = &wmv1_class,