2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/mem_internal.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/thread.h"
47 #include "mpegvideo.h"
48 #include "mpegvideodata.h"
52 #include "mjpegenc_common.h"
54 #include "mpegutils.h"
56 #include "speedhqenc.h"
58 #include "pixblockdsp.h"
62 #include "aandcttab.h"
64 #include "mpeg4video.h"
66 #include "bytestream.h"
69 #include "packet_internal.h"
73 #define QUANT_BIAS_SHIFT 8
75 #define QMAT_SHIFT_MMX 16
78 static int encode_picture(MpegEncContext *s, int picture_number);
79 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
80 static int sse_mb(MpegEncContext *s);
81 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
82 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
84 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
85 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
87 const AVOption ff_mpv_generic_options[] = {
89 #if FF_API_MPEGVIDEO_OPTS
90 FF_MPV_DEPRECATED_MPEG_QUANT_OPT
91 FF_MPV_DEPRECATED_A53_CC_OPT
92 FF_MPV_DEPRECATED_MATRIX_OPT
93 FF_MPV_DEPRECATED_BFRAME_OPTS
98 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
99 uint16_t (*qmat16)[2][64],
100 const uint16_t *quant_matrix,
101 int bias, int qmin, int qmax, int intra)
103 FDCTDSPContext *fdsp = &s->fdsp;
107 for (qscale = qmin; qscale <= qmax; qscale++) {
111 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
112 else qscale2 = qscale << 1;
114 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
116 fdsp->fdct == ff_faandct ||
117 #endif /* CONFIG_FAANDCT */
118 fdsp->fdct == ff_jpeg_fdct_islow_10) {
119 for (i = 0; i < 64; i++) {
120 const int j = s->idsp.idct_permutation[i];
121 int64_t den = (int64_t) qscale2 * quant_matrix[j];
122 /* 16 <= qscale * quant_matrix[i] <= 7905
123 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
124 * 19952 <= x <= 249205026
125 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
126 * 3444240 >= (1 << 36) / (x) >= 275 */
128 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
130 } else if (fdsp->fdct == ff_fdct_ifast) {
131 for (i = 0; i < 64; i++) {
132 const int j = s->idsp.idct_permutation[i];
133 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
134 /* 16 <= qscale * quant_matrix[i] <= 7905
135 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
136 * 19952 <= x <= 249205026
137 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
138 * 3444240 >= (1 << 36) / (x) >= 275 */
140 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
143 for (i = 0; i < 64; i++) {
144 const int j = s->idsp.idct_permutation[i];
145 int64_t den = (int64_t) qscale2 * quant_matrix[j];
146 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
147 * Assume x = qscale * quant_matrix[i]
149 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
150 * so 32768 >= (1 << 19) / (x) >= 67 */
151 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
152 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
153 // (qscale * quant_matrix[i]);
154 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
156 if (qmat16[qscale][0][i] == 0 ||
157 qmat16[qscale][0][i] == 128 * 256)
158 qmat16[qscale][0][i] = 128 * 256 - 1;
159 qmat16[qscale][1][i] =
160 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
161 qmat16[qscale][0][i]);
165 for (i = intra; i < 64; i++) {
167 if (fdsp->fdct == ff_fdct_ifast) {
168 max = (8191LL * ff_aanscales[i]) >> 14;
170 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
176 av_log(s->avctx, AV_LOG_INFO,
177 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
182 static inline void update_qscale(MpegEncContext *s)
184 if (s->q_scale_type == 1 && 0) {
186 int bestdiff=INT_MAX;
189 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
190 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
191 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
192 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
194 if (diff < bestdiff) {
201 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
202 (FF_LAMBDA_SHIFT + 7);
203 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
206 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
210 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
216 for (i = 0; i < 64; i++) {
217 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
224 * init s->current_picture.qscale_table from s->lambda_table
226 void ff_init_qscale_tab(MpegEncContext *s)
228 int8_t * const qscale_table = s->current_picture.qscale_table;
231 for (i = 0; i < s->mb_num; i++) {
232 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
233 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
234 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
239 static void update_duplicate_context_after_me(MpegEncContext *dst,
242 #define COPY(a) dst->a= src->a
244 COPY(current_picture);
250 COPY(picture_in_gop_number);
251 COPY(gop_picture_number);
252 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
253 COPY(progressive_frame); // FIXME don't set in encode_header
254 COPY(partitioned_frame); // FIXME don't set in encode_header
258 static void mpv_encode_init_static(void)
260 for (int i = -16; i < 16; i++)
261 default_fcode_tab[i + MAX_MV] = 1;
265 * Set the given MpegEncContext to defaults for encoding.
266 * the changed fields will not depend upon the prior state of the MpegEncContext.
268 static void mpv_encode_defaults(MpegEncContext *s)
270 static AVOnce init_static_once = AV_ONCE_INIT;
272 ff_mpv_common_defaults(s);
274 ff_thread_once(&init_static_once, mpv_encode_init_static);
276 s->me.mv_penalty = default_mv_penalty;
277 s->fcode_tab = default_fcode_tab;
279 s->input_picture_number = 0;
280 s->picture_in_gop_number = 0;
283 av_cold int ff_dct_encode_init(MpegEncContext *s)
286 ff_dct_encode_init_x86(s);
288 if (CONFIG_H263_ENCODER)
289 ff_h263dsp_init(&s->h263dsp);
290 if (!s->dct_quantize)
291 s->dct_quantize = ff_dct_quantize_c;
293 s->denoise_dct = denoise_dct_c;
294 s->fast_dct_quantize = s->dct_quantize;
295 if (s->avctx->trellis)
296 s->dct_quantize = dct_quantize_trellis_c;
301 /* init video encoder */
302 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
304 MpegEncContext *s = avctx->priv_data;
305 AVCPBProperties *cpb_props;
308 mpv_encode_defaults(s);
310 switch (avctx->pix_fmt) {
311 case AV_PIX_FMT_YUVJ444P:
312 case AV_PIX_FMT_YUV444P:
313 s->chroma_format = CHROMA_444;
315 case AV_PIX_FMT_YUVJ422P:
316 case AV_PIX_FMT_YUV422P:
317 s->chroma_format = CHROMA_422;
319 case AV_PIX_FMT_YUVJ420P:
320 case AV_PIX_FMT_YUV420P:
322 s->chroma_format = CHROMA_420;
326 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
328 #if FF_API_PRIVATE_OPT
329 FF_DISABLE_DEPRECATION_WARNINGS
330 if (avctx->rtp_payload_size)
331 s->rtp_payload_size = avctx->rtp_payload_size;
332 if (avctx->me_penalty_compensation)
333 s->me_penalty_compensation = avctx->me_penalty_compensation;
335 s->me_pre = avctx->pre_me;
336 FF_ENABLE_DEPRECATION_WARNINGS
339 s->bit_rate = avctx->bit_rate;
340 s->width = avctx->width;
341 s->height = avctx->height;
342 if (avctx->gop_size > 600 &&
343 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
344 av_log(avctx, AV_LOG_WARNING,
345 "keyframe interval too large!, reducing it from %d to %d\n",
346 avctx->gop_size, 600);
347 avctx->gop_size = 600;
349 s->gop_size = avctx->gop_size;
351 if (avctx->max_b_frames > MAX_B_FRAMES) {
352 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
353 "is %d.\n", MAX_B_FRAMES);
354 avctx->max_b_frames = MAX_B_FRAMES;
356 s->max_b_frames = avctx->max_b_frames;
357 s->codec_id = avctx->codec->id;
358 s->strict_std_compliance = avctx->strict_std_compliance;
359 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
360 s->rtp_mode = !!s->rtp_payload_size;
361 s->intra_dc_precision = avctx->intra_dc_precision;
363 // workaround some differences between how applications specify dc precision
364 if (s->intra_dc_precision < 0) {
365 s->intra_dc_precision += 8;
366 } else if (s->intra_dc_precision >= 8)
367 s->intra_dc_precision -= 8;
369 if (s->intra_dc_precision < 0) {
370 av_log(avctx, AV_LOG_ERROR,
371 "intra dc precision must be positive, note some applications use"
372 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
373 return AVERROR(EINVAL);
376 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
379 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
380 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
381 return AVERROR(EINVAL);
383 s->user_specified_pts = AV_NOPTS_VALUE;
385 if (s->gop_size <= 1) {
393 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
395 s->adaptive_quant = (avctx->lumi_masking ||
396 avctx->dark_masking ||
397 avctx->temporal_cplx_masking ||
398 avctx->spatial_cplx_masking ||
401 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
404 s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
406 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
407 switch(avctx->codec_id) {
408 case AV_CODEC_ID_MPEG1VIDEO:
409 case AV_CODEC_ID_MPEG2VIDEO:
410 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
412 case AV_CODEC_ID_MPEG4:
413 case AV_CODEC_ID_MSMPEG4V1:
414 case AV_CODEC_ID_MSMPEG4V2:
415 case AV_CODEC_ID_MSMPEG4V3:
416 if (avctx->rc_max_rate >= 15000000) {
417 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
418 } else if(avctx->rc_max_rate >= 2000000) {
419 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
420 } else if(avctx->rc_max_rate >= 384000) {
421 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
423 avctx->rc_buffer_size = 40;
424 avctx->rc_buffer_size *= 16384;
427 if (avctx->rc_buffer_size) {
428 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
432 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
433 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
434 return AVERROR(EINVAL);
437 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
438 av_log(avctx, AV_LOG_INFO,
439 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
442 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
443 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
444 return AVERROR(EINVAL);
447 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
448 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
449 return AVERROR(EINVAL);
452 if (avctx->rc_max_rate &&
453 avctx->rc_max_rate == avctx->bit_rate &&
454 avctx->rc_max_rate != avctx->rc_min_rate) {
455 av_log(avctx, AV_LOG_INFO,
456 "impossible bitrate constraints, this will fail\n");
459 if (avctx->rc_buffer_size &&
460 avctx->bit_rate * (int64_t)avctx->time_base.num >
461 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
462 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
463 return AVERROR(EINVAL);
466 if (!s->fixed_qscale &&
467 avctx->bit_rate * av_q2d(avctx->time_base) >
468 avctx->bit_rate_tolerance) {
469 av_log(avctx, AV_LOG_WARNING,
470 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
471 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
474 if (avctx->rc_max_rate &&
475 avctx->rc_min_rate == avctx->rc_max_rate &&
476 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
477 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
478 90000LL * (avctx->rc_buffer_size - 1) >
479 avctx->rc_max_rate * 0xFFFFLL) {
480 av_log(avctx, AV_LOG_INFO,
481 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
482 "specified vbv buffer is too large for the given bitrate!\n");
485 if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
486 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
487 s->codec_id != AV_CODEC_ID_FLV1) {
488 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
489 return AVERROR(EINVAL);
492 if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
493 av_log(avctx, AV_LOG_ERROR,
494 "OBMC is only supported with simple mb decision\n");
495 return AVERROR(EINVAL);
498 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
499 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
500 return AVERROR(EINVAL);
503 if (s->max_b_frames &&
504 s->codec_id != AV_CODEC_ID_MPEG4 &&
505 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
506 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
507 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
508 return AVERROR(EINVAL);
510 if (s->max_b_frames < 0) {
511 av_log(avctx, AV_LOG_ERROR,
512 "max b frames must be 0 or positive for mpegvideo based encoders\n");
513 return AVERROR(EINVAL);
516 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
517 s->codec_id == AV_CODEC_ID_H263 ||
518 s->codec_id == AV_CODEC_ID_H263P) &&
519 (avctx->sample_aspect_ratio.num > 255 ||
520 avctx->sample_aspect_ratio.den > 255)) {
521 av_log(avctx, AV_LOG_WARNING,
522 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
523 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
524 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
525 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
528 if ((s->codec_id == AV_CODEC_ID_H263 ||
529 s->codec_id == AV_CODEC_ID_H263P) &&
530 (avctx->width > 2048 ||
531 avctx->height > 1152 )) {
532 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
533 return AVERROR(EINVAL);
535 if ((s->codec_id == AV_CODEC_ID_H263 ||
536 s->codec_id == AV_CODEC_ID_H263P ||
537 s->codec_id == AV_CODEC_ID_RV20) &&
538 ((avctx->width &3) ||
539 (avctx->height&3) )) {
540 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
541 return AVERROR(EINVAL);
544 if (s->codec_id == AV_CODEC_ID_RV10 &&
546 avctx->height&15 )) {
547 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
548 return AVERROR(EINVAL);
551 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
552 s->codec_id == AV_CODEC_ID_WMV2) &&
554 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
555 return AVERROR(EINVAL);
558 if ((avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
559 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
560 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
561 return AVERROR(EINVAL);
564 #if FF_API_PRIVATE_OPT
565 FF_DISABLE_DEPRECATION_WARNINGS
566 if (avctx->mpeg_quant)
568 FF_ENABLE_DEPRECATION_WARNINGS
570 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
571 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
572 av_log(avctx, AV_LOG_ERROR,
573 "mpeg2 style quantization not supported by codec\n");
574 return AVERROR(EINVAL);
578 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
579 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
580 return AVERROR(EINVAL);
583 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
584 avctx->mb_decision != FF_MB_DECISION_RD) {
585 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
586 return AVERROR(EINVAL);
589 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
590 (s->codec_id == AV_CODEC_ID_AMV ||
591 s->codec_id == AV_CODEC_ID_MJPEG)) {
592 // Used to produce garbage with MJPEG.
593 av_log(avctx, AV_LOG_ERROR,
594 "QP RD is no longer compatible with MJPEG or AMV\n");
595 return AVERROR(EINVAL);
598 #if FF_API_PRIVATE_OPT
599 FF_DISABLE_DEPRECATION_WARNINGS
600 if (avctx->scenechange_threshold)
601 s->scenechange_threshold = avctx->scenechange_threshold;
602 FF_ENABLE_DEPRECATION_WARNINGS
605 if (s->scenechange_threshold < 1000000000 &&
606 (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
607 av_log(avctx, AV_LOG_ERROR,
608 "closed gop with scene change detection are not supported yet, "
609 "set threshold to 1000000000\n");
610 return AVERROR_PATCHWELCOME;
613 if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
614 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
615 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
616 av_log(avctx, AV_LOG_ERROR,
617 "low delay forcing is only available for mpeg2, "
618 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
619 return AVERROR(EINVAL);
621 if (s->max_b_frames != 0) {
622 av_log(avctx, AV_LOG_ERROR,
623 "B-frames cannot be used with low delay\n");
624 return AVERROR(EINVAL);
628 if (s->q_scale_type == 1) {
629 if (avctx->qmax > 28) {
630 av_log(avctx, AV_LOG_ERROR,
631 "non linear quant only supports qmax <= 28 currently\n");
632 return AVERROR_PATCHWELCOME;
636 if (avctx->slices > 1 &&
637 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
638 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
639 return AVERROR(EINVAL);
642 if (avctx->thread_count > 1 &&
643 s->codec_id != AV_CODEC_ID_MPEG4 &&
644 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
645 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
646 s->codec_id != AV_CODEC_ID_MJPEG &&
647 (s->codec_id != AV_CODEC_ID_H263P)) {
648 av_log(avctx, AV_LOG_ERROR,
649 "multi threaded encoding not supported by codec\n");
650 return AVERROR_PATCHWELCOME;
653 if (avctx->thread_count < 1) {
654 av_log(avctx, AV_LOG_ERROR,
655 "automatic thread number detection not supported by codec, "
657 return AVERROR_PATCHWELCOME;
660 #if FF_API_PRIVATE_OPT
661 FF_DISABLE_DEPRECATION_WARNINGS
662 if (avctx->b_frame_strategy)
663 s->b_frame_strategy = avctx->b_frame_strategy;
664 if (avctx->b_sensitivity != 40)
665 s->b_sensitivity = avctx->b_sensitivity;
666 FF_ENABLE_DEPRECATION_WARNINGS
669 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
670 av_log(avctx, AV_LOG_INFO,
671 "notice: b_frame_strategy only affects the first pass\n");
672 s->b_frame_strategy = 0;
675 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
677 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
678 avctx->time_base.den /= i;
679 avctx->time_base.num /= i;
683 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
684 // (a + x * 3 / 8) / x
685 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
686 s->inter_quant_bias = 0;
688 s->intra_quant_bias = 0;
690 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
693 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
694 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
695 return AVERROR(EINVAL);
698 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
700 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
701 avctx->time_base.den > (1 << 16) - 1) {
702 av_log(avctx, AV_LOG_ERROR,
703 "timebase %d/%d not supported by MPEG 4 standard, "
704 "the maximum admitted value for the timebase denominator "
705 "is %d\n", avctx->time_base.num, avctx->time_base.den,
707 return AVERROR(EINVAL);
709 s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
711 switch (avctx->codec->id) {
712 case AV_CODEC_ID_MPEG1VIDEO:
713 s->out_format = FMT_MPEG1;
714 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
715 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
717 case AV_CODEC_ID_MPEG2VIDEO:
718 s->out_format = FMT_MPEG1;
719 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
720 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
723 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
724 case AV_CODEC_ID_MJPEG:
725 case AV_CODEC_ID_AMV:
726 s->out_format = FMT_MJPEG;
727 s->intra_only = 1; /* force intra only for jpeg */
728 if ((ret = ff_mjpeg_encode_init(s)) < 0)
734 case AV_CODEC_ID_SPEEDHQ:
735 s->out_format = FMT_SPEEDHQ;
736 s->intra_only = 1; /* force intra only for SHQ */
737 if (!CONFIG_SPEEDHQ_ENCODER)
738 return AVERROR_ENCODER_NOT_FOUND;
739 if ((ret = ff_speedhq_encode_init(s)) < 0)
744 case AV_CODEC_ID_H261:
745 if (!CONFIG_H261_ENCODER)
746 return AVERROR_ENCODER_NOT_FOUND;
747 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
748 av_log(avctx, AV_LOG_ERROR,
749 "The specified picture size of %dx%d is not valid for the "
750 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
751 s->width, s->height);
752 return AVERROR(EINVAL);
754 s->out_format = FMT_H261;
757 s->rtp_mode = 0; /* Sliced encoding not supported */
759 case AV_CODEC_ID_H263:
760 if (!CONFIG_H263_ENCODER)
761 return AVERROR_ENCODER_NOT_FOUND;
762 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
763 s->width, s->height) == 8) {
764 av_log(avctx, AV_LOG_ERROR,
765 "The specified picture size of %dx%d is not valid for "
766 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
767 "352x288, 704x576, and 1408x1152. "
768 "Try H.263+.\n", s->width, s->height);
769 return AVERROR(EINVAL);
771 s->out_format = FMT_H263;
775 case AV_CODEC_ID_H263P:
776 s->out_format = FMT_H263;
779 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
780 s->modified_quant = s->h263_aic;
781 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
782 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
785 /* These are just to be sure */
789 case AV_CODEC_ID_FLV1:
790 s->out_format = FMT_H263;
791 s->h263_flv = 2; /* format = 1; 11-bit codes */
792 s->unrestricted_mv = 1;
793 s->rtp_mode = 0; /* don't allow GOB */
797 case AV_CODEC_ID_RV10:
798 s->out_format = FMT_H263;
802 case AV_CODEC_ID_RV20:
803 s->out_format = FMT_H263;
806 s->modified_quant = 1;
810 s->unrestricted_mv = 0;
812 case AV_CODEC_ID_MPEG4:
813 s->out_format = FMT_H263;
815 s->unrestricted_mv = 1;
816 s->low_delay = s->max_b_frames ? 0 : 1;
817 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
819 case AV_CODEC_ID_MSMPEG4V2:
820 s->out_format = FMT_H263;
822 s->unrestricted_mv = 1;
823 s->msmpeg4_version = 2;
827 case AV_CODEC_ID_MSMPEG4V3:
828 s->out_format = FMT_H263;
830 s->unrestricted_mv = 1;
831 s->msmpeg4_version = 3;
832 s->flipflop_rounding = 1;
836 case AV_CODEC_ID_WMV1:
837 s->out_format = FMT_H263;
839 s->unrestricted_mv = 1;
840 s->msmpeg4_version = 4;
841 s->flipflop_rounding = 1;
845 case AV_CODEC_ID_WMV2:
846 s->out_format = FMT_H263;
848 s->unrestricted_mv = 1;
849 s->msmpeg4_version = 5;
850 s->flipflop_rounding = 1;
855 return AVERROR(EINVAL);
858 #if FF_API_PRIVATE_OPT
859 FF_DISABLE_DEPRECATION_WARNINGS
860 if (avctx->noise_reduction)
861 s->noise_reduction = avctx->noise_reduction;
862 FF_ENABLE_DEPRECATION_WARNINGS
865 avctx->has_b_frames = !s->low_delay;
869 s->progressive_frame =
870 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
871 AV_CODEC_FLAG_INTERLACED_ME) ||
876 if ((ret = ff_mpv_common_init(s)) < 0)
879 ff_fdctdsp_init(&s->fdsp, avctx);
880 ff_me_cmp_init(&s->mecc, avctx);
881 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
882 ff_pixblockdsp_init(&s->pdsp, avctx);
883 ff_qpeldsp_init(&s->qdsp);
885 if (s->msmpeg4_version) {
886 int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
887 if (!(s->ac_stats = av_mallocz(ac_stats_size)))
888 return AVERROR(ENOMEM);
891 if (!(avctx->stats_out = av_mallocz(256)) ||
892 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
893 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
894 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
895 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
896 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
897 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
898 !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
899 !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
900 return AVERROR(ENOMEM);
902 if (s->noise_reduction) {
903 if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
904 return AVERROR(ENOMEM);
907 ff_dct_encode_init(s);
909 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
910 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
912 if (s->slice_context_count > 1) {
915 if (avctx->codec_id == AV_CODEC_ID_H263P)
916 s->h263_slice_structured = 1;
919 s->quant_precision = 5;
921 #if FF_API_PRIVATE_OPT
922 FF_DISABLE_DEPRECATION_WARNINGS
923 if (avctx->frame_skip_threshold)
924 s->frame_skip_threshold = avctx->frame_skip_threshold;
925 if (avctx->frame_skip_factor)
926 s->frame_skip_factor = avctx->frame_skip_factor;
927 if (avctx->frame_skip_exp)
928 s->frame_skip_exp = avctx->frame_skip_exp;
929 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
930 s->frame_skip_cmp = avctx->frame_skip_cmp;
931 FF_ENABLE_DEPRECATION_WARNINGS
934 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
935 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
937 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
938 ff_h261_encode_init(s);
939 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
940 ff_h263_encode_init(s);
941 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
942 ff_msmpeg4_encode_init(s);
943 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
944 && s->out_format == FMT_MPEG1)
945 ff_mpeg1_encode_init(s);
948 for (i = 0; i < 64; i++) {
949 int j = s->idsp.idct_permutation[i];
950 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
952 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
953 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
954 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
956 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
957 } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
959 s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
962 s->chroma_intra_matrix[j] =
963 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
964 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
966 if (avctx->intra_matrix)
967 s->intra_matrix[j] = avctx->intra_matrix[i];
968 if (avctx->inter_matrix)
969 s->inter_matrix[j] = avctx->inter_matrix[i];
972 /* precompute matrix */
973 /* for mjpeg, we do include qscale in the matrix */
974 if (s->out_format != FMT_MJPEG) {
975 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
976 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
978 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
979 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
983 if ((ret = ff_rate_control_init(s)) < 0)
986 #if FF_API_PRIVATE_OPT
987 FF_DISABLE_DEPRECATION_WARNINGS
988 if (avctx->brd_scale)
989 s->brd_scale = avctx->brd_scale;
991 if (avctx->prediction_method)
992 s->pred = avctx->prediction_method + 1;
993 FF_ENABLE_DEPRECATION_WARNINGS
996 if (s->b_frame_strategy == 2) {
997 for (i = 0; i < s->max_b_frames + 2; i++) {
998 s->tmp_frames[i] = av_frame_alloc();
999 if (!s->tmp_frames[i])
1000 return AVERROR(ENOMEM);
1002 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1003 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1004 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1006 ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1012 cpb_props = ff_add_cpb_side_data(avctx);
1014 return AVERROR(ENOMEM);
1015 cpb_props->max_bitrate = avctx->rc_max_rate;
1016 cpb_props->min_bitrate = avctx->rc_min_rate;
1017 cpb_props->avg_bitrate = avctx->bit_rate;
1018 cpb_props->buffer_size = avctx->rc_buffer_size;
1023 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1025 MpegEncContext *s = avctx->priv_data;
1028 ff_rate_control_uninit(s);
1030 ff_mpv_common_end(s);
1031 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
1032 s->out_format == FMT_MJPEG)
1033 ff_mjpeg_encode_close(s);
1035 av_freep(&avctx->extradata);
1037 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1038 av_frame_free(&s->tmp_frames[i]);
1040 ff_free_picture_tables(&s->new_picture);
1041 ff_mpeg_unref_picture(avctx, &s->new_picture);
1043 av_freep(&avctx->stats_out);
1044 av_freep(&s->ac_stats);
1046 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1047 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1048 s->q_chroma_intra_matrix= NULL;
1049 s->q_chroma_intra_matrix16= NULL;
1050 av_freep(&s->q_intra_matrix);
1051 av_freep(&s->q_inter_matrix);
1052 av_freep(&s->q_intra_matrix16);
1053 av_freep(&s->q_inter_matrix16);
1054 av_freep(&s->input_picture);
1055 av_freep(&s->reordered_input_picture);
1056 av_freep(&s->dct_offset);
1061 static int get_sae(uint8_t *src, int ref, int stride)
1066 for (y = 0; y < 16; y++) {
1067 for (x = 0; x < 16; x++) {
1068 acc += FFABS(src[x + y * stride] - ref);
1075 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1076 uint8_t *ref, int stride)
1082 h = s->height & ~15;
1084 for (y = 0; y < h; y += 16) {
1085 for (x = 0; x < w; x += 16) {
1086 int offset = x + y * stride;
1087 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1089 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1090 int sae = get_sae(src + offset, mean, stride);
1092 acc += sae + 500 < sad;
1098 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1100 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1101 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1102 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1103 &s->linesize, &s->uvlinesize);
1106 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1108 Picture *pic = NULL;
1110 int i, display_picture_number = 0, ret;
1111 int encoding_delay = s->max_b_frames ? s->max_b_frames
1112 : (s->low_delay ? 0 : 1);
1113 int flush_offset = 1;
1118 display_picture_number = s->input_picture_number++;
1120 if (pts != AV_NOPTS_VALUE) {
1121 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1122 int64_t last = s->user_specified_pts;
1125 av_log(s->avctx, AV_LOG_ERROR,
1126 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1128 return AVERROR(EINVAL);
1131 if (!s->low_delay && display_picture_number == 1)
1132 s->dts_delta = pts - last;
1134 s->user_specified_pts = pts;
1136 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1137 s->user_specified_pts =
1138 pts = s->user_specified_pts + 1;
1139 av_log(s->avctx, AV_LOG_INFO,
1140 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1143 pts = display_picture_number;
1147 if (!pic_arg->buf[0] ||
1148 pic_arg->linesize[0] != s->linesize ||
1149 pic_arg->linesize[1] != s->uvlinesize ||
1150 pic_arg->linesize[2] != s->uvlinesize)
1152 if ((s->width & 15) || (s->height & 15))
1154 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1156 if (s->linesize & (STRIDE_ALIGN-1))
1159 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1160 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1162 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1166 pic = &s->picture[i];
1170 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1173 ret = alloc_picture(s, pic, direct);
1178 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1179 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1180 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1183 int h_chroma_shift, v_chroma_shift;
1184 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1188 for (i = 0; i < 3; i++) {
1189 int src_stride = pic_arg->linesize[i];
1190 int dst_stride = i ? s->uvlinesize : s->linesize;
1191 int h_shift = i ? h_chroma_shift : 0;
1192 int v_shift = i ? v_chroma_shift : 0;
1193 int w = s->width >> h_shift;
1194 int h = s->height >> v_shift;
1195 uint8_t *src = pic_arg->data[i];
1196 uint8_t *dst = pic->f->data[i];
1199 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1200 && !s->progressive_sequence
1201 && FFALIGN(s->height, 32) - s->height > 16)
1204 if (!s->avctx->rc_buffer_size)
1205 dst += INPLACE_OFFSET;
1207 if (src_stride == dst_stride)
1208 memcpy(dst, src, src_stride * h);
1211 uint8_t *dst2 = dst;
1213 memcpy(dst2, src, w);
1218 if ((s->width & 15) || (s->height & (vpad-1))) {
1219 s->mpvencdsp.draw_edges(dst, dst_stride,
1229 ret = av_frame_copy_props(pic->f, pic_arg);
1233 pic->f->display_picture_number = display_picture_number;
1234 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1236 /* Flushing: When we have not received enough input frames,
1237 * ensure s->input_picture[0] contains the first picture */
1238 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1239 if (s->input_picture[flush_offset])
1242 if (flush_offset <= 1)
1245 encoding_delay = encoding_delay - flush_offset + 1;
1248 /* shift buffer entries */
1249 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1250 s->input_picture[i - flush_offset] = s->input_picture[i];
1252 s->input_picture[encoding_delay] = (Picture*) pic;
1257 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1261 int64_t score64 = 0;
1263 for (plane = 0; plane < 3; plane++) {
1264 const int stride = p->f->linesize[plane];
1265 const int bw = plane ? 1 : 2;
1266 for (y = 0; y < s->mb_height * bw; y++) {
1267 for (x = 0; x < s->mb_width * bw; x++) {
1268 int off = p->shared ? 0 : 16;
1269 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1270 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1271 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1273 switch (FFABS(s->frame_skip_exp)) {
1274 case 0: score = FFMAX(score, v); break;
1275 case 1: score += FFABS(v); break;
1276 case 2: score64 += v * (int64_t)v; break;
1277 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1278 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1287 if (s->frame_skip_exp < 0)
1288 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1289 -1.0/s->frame_skip_exp);
1291 if (score64 < s->frame_skip_threshold)
1293 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1298 static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
1303 ret = avcodec_send_frame(c, frame);
1308 ret = avcodec_receive_packet(c, pkt);
1311 av_packet_unref(pkt);
1312 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1319 static int estimate_best_b_count(MpegEncContext *s)
1321 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1323 const int scale = s->brd_scale;
1324 int width = s->width >> scale;
1325 int height = s->height >> scale;
1326 int i, j, out_size, p_lambda, b_lambda, lambda2;
1327 int64_t best_rd = INT64_MAX;
1328 int best_b_count = -1;
1331 av_assert0(scale >= 0 && scale <= 3);
1333 pkt = av_packet_alloc();
1335 return AVERROR(ENOMEM);
1338 //s->next_picture_ptr->quality;
1339 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1340 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1341 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1342 if (!b_lambda) // FIXME we should do this somewhere else
1343 b_lambda = p_lambda;
1344 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1347 for (i = 0; i < s->max_b_frames + 2; i++) {
1348 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1349 s->next_picture_ptr;
1352 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1353 pre_input = *pre_input_ptr;
1354 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1356 if (!pre_input.shared && i) {
1357 data[0] += INPLACE_OFFSET;
1358 data[1] += INPLACE_OFFSET;
1359 data[2] += INPLACE_OFFSET;
1362 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1363 s->tmp_frames[i]->linesize[0],
1365 pre_input.f->linesize[0],
1367 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1368 s->tmp_frames[i]->linesize[1],
1370 pre_input.f->linesize[1],
1371 width >> 1, height >> 1);
1372 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1373 s->tmp_frames[i]->linesize[2],
1375 pre_input.f->linesize[2],
1376 width >> 1, height >> 1);
1380 for (j = 0; j < s->max_b_frames + 1; j++) {
1384 if (!s->input_picture[j])
1387 c = avcodec_alloc_context3(NULL);
1389 ret = AVERROR(ENOMEM);
1395 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1396 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1397 c->mb_decision = s->avctx->mb_decision;
1398 c->me_cmp = s->avctx->me_cmp;
1399 c->mb_cmp = s->avctx->mb_cmp;
1400 c->me_sub_cmp = s->avctx->me_sub_cmp;
1401 c->pix_fmt = AV_PIX_FMT_YUV420P;
1402 c->time_base = s->avctx->time_base;
1403 c->max_b_frames = s->max_b_frames;
1405 ret = avcodec_open2(c, codec, NULL);
1410 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1411 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1413 out_size = encode_frame(c, s->tmp_frames[0], pkt);
1419 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1421 for (i = 0; i < s->max_b_frames + 1; i++) {
1422 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1424 s->tmp_frames[i + 1]->pict_type = is_p ?
1425 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1426 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1428 out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1434 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1437 /* get the delayed frames */
1438 out_size = encode_frame(c, NULL, pkt);
1443 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1445 rd += c->error[0] + c->error[1] + c->error[2];
1453 avcodec_free_context(&c);
1454 av_packet_unref(pkt);
1461 av_packet_free(&pkt);
1463 return best_b_count;
1466 static int select_input_picture(MpegEncContext *s)
1470 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1471 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1472 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1474 /* set next picture type & ordering */
1475 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1476 if (s->frame_skip_threshold || s->frame_skip_factor) {
1477 if (s->picture_in_gop_number < s->gop_size &&
1478 s->next_picture_ptr &&
1479 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1480 // FIXME check that the gop check above is +-1 correct
1481 av_frame_unref(s->input_picture[0]->f);
1483 ff_vbv_update(s, 0);
1489 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1490 !s->next_picture_ptr || s->intra_only) {
1491 s->reordered_input_picture[0] = s->input_picture[0];
1492 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1493 s->reordered_input_picture[0]->f->coded_picture_number =
1494 s->coded_picture_number++;
1498 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1499 for (i = 0; i < s->max_b_frames + 1; i++) {
1500 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1502 if (pict_num >= s->rc_context.num_entries)
1504 if (!s->input_picture[i]) {
1505 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1509 s->input_picture[i]->f->pict_type =
1510 s->rc_context.entry[pict_num].new_pict_type;
1514 if (s->b_frame_strategy == 0) {
1515 b_frames = s->max_b_frames;
1516 while (b_frames && !s->input_picture[b_frames])
1518 } else if (s->b_frame_strategy == 1) {
1519 for (i = 1; i < s->max_b_frames + 1; i++) {
1520 if (s->input_picture[i] &&
1521 s->input_picture[i]->b_frame_score == 0) {
1522 s->input_picture[i]->b_frame_score =
1524 s->input_picture[i ]->f->data[0],
1525 s->input_picture[i - 1]->f->data[0],
1529 for (i = 0; i < s->max_b_frames + 1; i++) {
1530 if (!s->input_picture[i] ||
1531 s->input_picture[i]->b_frame_score - 1 >
1532 s->mb_num / s->b_sensitivity)
1536 b_frames = FFMAX(0, i - 1);
1539 for (i = 0; i < b_frames + 1; i++) {
1540 s->input_picture[i]->b_frame_score = 0;
1542 } else if (s->b_frame_strategy == 2) {
1543 b_frames = estimate_best_b_count(s);
1550 for (i = b_frames - 1; i >= 0; i--) {
1551 int type = s->input_picture[i]->f->pict_type;
1552 if (type && type != AV_PICTURE_TYPE_B)
1555 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1556 b_frames == s->max_b_frames) {
1557 av_log(s->avctx, AV_LOG_ERROR,
1558 "warning, too many B-frames in a row\n");
1561 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1562 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1563 s->gop_size > s->picture_in_gop_number) {
1564 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1566 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1568 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1572 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1573 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1576 s->reordered_input_picture[0] = s->input_picture[b_frames];
1577 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1578 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1579 s->reordered_input_picture[0]->f->coded_picture_number =
1580 s->coded_picture_number++;
1581 for (i = 0; i < b_frames; i++) {
1582 s->reordered_input_picture[i + 1] = s->input_picture[i];
1583 s->reordered_input_picture[i + 1]->f->pict_type =
1585 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1586 s->coded_picture_number++;
1591 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1593 if (s->reordered_input_picture[0]) {
1594 s->reordered_input_picture[0]->reference =
1595 s->reordered_input_picture[0]->f->pict_type !=
1596 AV_PICTURE_TYPE_B ? 3 : 0;
1598 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1601 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1602 // input is a shared pix, so we can't modify it -> allocate a new
1603 // one & ensure that the shared one is reuseable
1606 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1609 pic = &s->picture[i];
1611 pic->reference = s->reordered_input_picture[0]->reference;
1612 if (alloc_picture(s, pic, 0) < 0) {
1616 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1620 /* mark us unused / free shared pic */
1621 av_frame_unref(s->reordered_input_picture[0]->f);
1622 s->reordered_input_picture[0]->shared = 0;
1624 s->current_picture_ptr = pic;
1626 // input is not a shared pix -> reuse buffer for current_pix
1627 s->current_picture_ptr = s->reordered_input_picture[0];
1628 for (i = 0; i < 4; i++) {
1629 if (s->new_picture.f->data[i])
1630 s->new_picture.f->data[i] += INPLACE_OFFSET;
1633 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1634 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1635 s->current_picture_ptr)) < 0)
1638 s->picture_number = s->new_picture.f->display_picture_number;
1643 static void frame_end(MpegEncContext *s)
1645 if (s->unrestricted_mv &&
1646 s->current_picture.reference &&
1648 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1649 int hshift = desc->log2_chroma_w;
1650 int vshift = desc->log2_chroma_h;
1651 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1652 s->current_picture.f->linesize[0],
1653 s->h_edge_pos, s->v_edge_pos,
1654 EDGE_WIDTH, EDGE_WIDTH,
1655 EDGE_TOP | EDGE_BOTTOM);
1656 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1657 s->current_picture.f->linesize[1],
1658 s->h_edge_pos >> hshift,
1659 s->v_edge_pos >> vshift,
1660 EDGE_WIDTH >> hshift,
1661 EDGE_WIDTH >> vshift,
1662 EDGE_TOP | EDGE_BOTTOM);
1663 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1664 s->current_picture.f->linesize[2],
1665 s->h_edge_pos >> hshift,
1666 s->v_edge_pos >> vshift,
1667 EDGE_WIDTH >> hshift,
1668 EDGE_WIDTH >> vshift,
1669 EDGE_TOP | EDGE_BOTTOM);
1674 s->last_pict_type = s->pict_type;
1675 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1676 if (s->pict_type!= AV_PICTURE_TYPE_B)
1677 s->last_non_b_pict_type = s->pict_type;
1679 #if FF_API_CODED_FRAME
1680 FF_DISABLE_DEPRECATION_WARNINGS
1681 av_frame_unref(s->avctx->coded_frame);
1682 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1683 FF_ENABLE_DEPRECATION_WARNINGS
1685 #if FF_API_ERROR_FRAME
1686 FF_DISABLE_DEPRECATION_WARNINGS
1687 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1688 sizeof(s->current_picture.encoding_error));
1689 FF_ENABLE_DEPRECATION_WARNINGS
1693 static void update_noise_reduction(MpegEncContext *s)
1697 for (intra = 0; intra < 2; intra++) {
1698 if (s->dct_count[intra] > (1 << 16)) {
1699 for (i = 0; i < 64; i++) {
1700 s->dct_error_sum[intra][i] >>= 1;
1702 s->dct_count[intra] >>= 1;
1705 for (i = 0; i < 64; i++) {
1706 s->dct_offset[intra][i] = (s->noise_reduction *
1707 s->dct_count[intra] +
1708 s->dct_error_sum[intra][i] / 2) /
1709 (s->dct_error_sum[intra][i] + 1);
1714 static int frame_start(MpegEncContext *s)
1718 /* mark & release old frames */
1719 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1720 s->last_picture_ptr != s->next_picture_ptr &&
1721 s->last_picture_ptr->f->buf[0]) {
1722 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1725 s->current_picture_ptr->f->pict_type = s->pict_type;
1726 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1728 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1729 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1730 s->current_picture_ptr)) < 0)
1733 if (s->pict_type != AV_PICTURE_TYPE_B) {
1734 s->last_picture_ptr = s->next_picture_ptr;
1736 s->next_picture_ptr = s->current_picture_ptr;
1739 if (s->last_picture_ptr) {
1740 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1741 if (s->last_picture_ptr->f->buf[0] &&
1742 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1743 s->last_picture_ptr)) < 0)
1746 if (s->next_picture_ptr) {
1747 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1748 if (s->next_picture_ptr->f->buf[0] &&
1749 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1750 s->next_picture_ptr)) < 0)
1754 if (s->picture_structure!= PICT_FRAME) {
1756 for (i = 0; i < 4; i++) {
1757 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1758 s->current_picture.f->data[i] +=
1759 s->current_picture.f->linesize[i];
1761 s->current_picture.f->linesize[i] *= 2;
1762 s->last_picture.f->linesize[i] *= 2;
1763 s->next_picture.f->linesize[i] *= 2;
1767 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1768 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1769 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1770 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1771 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1772 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1774 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1775 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1778 if (s->dct_error_sum) {
1779 av_assert2(s->noise_reduction && s->encoding);
1780 update_noise_reduction(s);
1786 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1787 const AVFrame *pic_arg, int *got_packet)
1789 MpegEncContext *s = avctx->priv_data;
1790 int i, stuffing_count, ret;
1791 int context_count = s->slice_context_count;
1793 s->vbv_ignore_qmax = 0;
1795 s->picture_in_gop_number++;
1797 if (load_input_picture(s, pic_arg) < 0)
1800 if (select_input_picture(s) < 0) {
1805 if (s->new_picture.f->data[0]) {
1806 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1807 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1809 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1810 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1813 s->mb_info_ptr = av_packet_new_side_data(pkt,
1814 AV_PKT_DATA_H263_MB_INFO,
1815 s->mb_width*s->mb_height*12);
1816 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1819 for (i = 0; i < context_count; i++) {
1820 int start_y = s->thread_context[i]->start_mb_y;
1821 int end_y = s->thread_context[i]-> end_mb_y;
1822 int h = s->mb_height;
1823 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1824 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1826 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1829 s->pict_type = s->new_picture.f->pict_type;
1831 ret = frame_start(s);
1835 ret = encode_picture(s, s->picture_number);
1836 if (growing_buffer) {
1837 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1838 pkt->data = s->pb.buf;
1839 pkt->size = avctx->internal->byte_buffer_size;
1846 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
1847 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1849 if (avctx->rc_buffer_size) {
1850 RateControlContext *rcc = &s->rc_context;
1851 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1852 int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1853 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1855 if (put_bits_count(&s->pb) > max_size &&
1856 s->lambda < s->lmax) {
1857 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1858 (s->qscale + 1) / s->qscale);
1859 if (s->adaptive_quant) {
1861 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1862 s->lambda_table[i] =
1863 FFMAX(s->lambda_table[i] + min_step,
1864 s->lambda_table[i] * (s->qscale + 1) /
1867 s->mb_skipped = 0; // done in frame_start()
1868 // done in encode_picture() so we must undo it
1869 if (s->pict_type == AV_PICTURE_TYPE_P) {
1870 if (s->flipflop_rounding ||
1871 s->codec_id == AV_CODEC_ID_H263P ||
1872 s->codec_id == AV_CODEC_ID_MPEG4)
1873 s->no_rounding ^= 1;
1875 if (s->pict_type != AV_PICTURE_TYPE_B) {
1876 s->time_base = s->last_time_base;
1877 s->last_non_b_time = s->time - s->pp_time;
1879 for (i = 0; i < context_count; i++) {
1880 PutBitContext *pb = &s->thread_context[i]->pb;
1881 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1883 s->vbv_ignore_qmax = 1;
1884 av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1888 av_assert0(avctx->rc_max_rate);
1891 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1892 ff_write_pass1_stats(s);
1894 for (i = 0; i < 4; i++) {
1895 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1896 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1898 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1899 s->current_picture_ptr->encoding_error,
1900 (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1903 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1904 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1905 s->misc_bits + s->i_tex_bits +
1907 flush_put_bits(&s->pb);
1908 s->frame_bits = put_bits_count(&s->pb);
1910 stuffing_count = ff_vbv_update(s, s->frame_bits);
1911 s->stuffing_bits = 8*stuffing_count;
1912 if (stuffing_count) {
1913 if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1914 av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1918 switch (s->codec_id) {
1919 case AV_CODEC_ID_MPEG1VIDEO:
1920 case AV_CODEC_ID_MPEG2VIDEO:
1921 while (stuffing_count--) {
1922 put_bits(&s->pb, 8, 0);
1925 case AV_CODEC_ID_MPEG4:
1926 put_bits(&s->pb, 16, 0);
1927 put_bits(&s->pb, 16, 0x1C3);
1928 stuffing_count -= 4;
1929 while (stuffing_count--) {
1930 put_bits(&s->pb, 8, 0xFF);
1934 av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1936 flush_put_bits(&s->pb);
1937 s->frame_bits = put_bits_count(&s->pb);
1940 /* update MPEG-1/2 vbv_delay for CBR */
1941 if (avctx->rc_max_rate &&
1942 avctx->rc_min_rate == avctx->rc_max_rate &&
1943 s->out_format == FMT_MPEG1 &&
1944 90000LL * (avctx->rc_buffer_size - 1) <=
1945 avctx->rc_max_rate * 0xFFFFLL) {
1946 AVCPBProperties *props;
1949 int vbv_delay, min_delay;
1950 double inbits = avctx->rc_max_rate *
1951 av_q2d(avctx->time_base);
1952 int minbits = s->frame_bits - 8 *
1953 (s->vbv_delay_ptr - s->pb.buf - 1);
1954 double bits = s->rc_context.buffer_index + minbits - inbits;
1957 av_log(avctx, AV_LOG_ERROR,
1958 "Internal error, negative bits\n");
1960 av_assert1(s->repeat_first_field == 0);
1962 vbv_delay = bits * 90000 / avctx->rc_max_rate;
1963 min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
1966 vbv_delay = FFMAX(vbv_delay, min_delay);
1968 av_assert0(vbv_delay < 0xFFFF);
1970 s->vbv_delay_ptr[0] &= 0xF8;
1971 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1972 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1973 s->vbv_delay_ptr[2] &= 0x07;
1974 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1976 props = av_cpb_properties_alloc(&props_size);
1978 return AVERROR(ENOMEM);
1979 props->vbv_delay = vbv_delay * 300;
1981 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
1982 (uint8_t*)props, props_size);
1988 #if FF_API_VBV_DELAY
1989 FF_DISABLE_DEPRECATION_WARNINGS
1990 avctx->vbv_delay = vbv_delay * 300;
1991 FF_ENABLE_DEPRECATION_WARNINGS
1994 s->total_bits += s->frame_bits;
1996 pkt->pts = s->current_picture.f->pts;
1997 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1998 if (!s->current_picture.f->coded_picture_number)
1999 pkt->dts = pkt->pts - s->dts_delta;
2001 pkt->dts = s->reordered_pts;
2002 s->reordered_pts = pkt->pts;
2004 pkt->dts = pkt->pts;
2005 if (s->current_picture.f->key_frame)
2006 pkt->flags |= AV_PKT_FLAG_KEY;
2008 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2013 /* release non-reference frames */
2014 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2015 if (!s->picture[i].reference)
2016 ff_mpeg_unref_picture(avctx, &s->picture[i]);
2019 av_assert1((s->frame_bits & 7) == 0);
2021 pkt->size = s->frame_bits / 8;
2022 *got_packet = !!pkt->size;
2026 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2027 int n, int threshold)
2029 static const char tab[64] = {
2030 3, 2, 2, 1, 1, 1, 1, 1,
2031 1, 1, 1, 1, 1, 1, 1, 1,
2032 1, 1, 1, 1, 1, 1, 1, 1,
2033 0, 0, 0, 0, 0, 0, 0, 0,
2034 0, 0, 0, 0, 0, 0, 0, 0,
2035 0, 0, 0, 0, 0, 0, 0, 0,
2036 0, 0, 0, 0, 0, 0, 0, 0,
2037 0, 0, 0, 0, 0, 0, 0, 0
2042 int16_t *block = s->block[n];
2043 const int last_index = s->block_last_index[n];
2046 if (threshold < 0) {
2048 threshold = -threshold;
2052 /* Are all we could set to zero already zero? */
2053 if (last_index <= skip_dc - 1)
2056 for (i = 0; i <= last_index; i++) {
2057 const int j = s->intra_scantable.permutated[i];
2058 const int level = FFABS(block[j]);
2060 if (skip_dc && i == 0)
2064 } else if (level > 1) {
2070 if (score >= threshold)
2072 for (i = skip_dc; i <= last_index; i++) {
2073 const int j = s->intra_scantable.permutated[i];
2077 s->block_last_index[n] = 0;
2079 s->block_last_index[n] = -1;
2082 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2086 const int maxlevel = s->max_qcoeff;
2087 const int minlevel = s->min_qcoeff;
2091 i = 1; // skip clipping of intra dc
2095 for (; i <= last_index; i++) {
2096 const int j = s->intra_scantable.permutated[i];
2097 int level = block[j];
2099 if (level > maxlevel) {
2102 } else if (level < minlevel) {
2110 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2111 av_log(s->avctx, AV_LOG_INFO,
2112 "warning, clipping %d dct coefficients to %d..%d\n",
2113 overflow, minlevel, maxlevel);
2116 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2120 for (y = 0; y < 8; y++) {
2121 for (x = 0; x < 8; x++) {
2127 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2128 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2129 int v = ptr[x2 + y2 * stride];
2135 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2140 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2141 int motion_x, int motion_y,
2142 int mb_block_height,
2146 int16_t weight[12][64];
2147 int16_t orig[12][64];
2148 const int mb_x = s->mb_x;
2149 const int mb_y = s->mb_y;
2152 int dct_offset = s->linesize * 8; // default for progressive frames
2153 int uv_dct_offset = s->uvlinesize * 8;
2154 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2155 ptrdiff_t wrap_y, wrap_c;
2157 for (i = 0; i < mb_block_count; i++)
2158 skip_dct[i] = s->skipdct;
2160 if (s->adaptive_quant) {
2161 const int last_qp = s->qscale;
2162 const int mb_xy = mb_x + mb_y * s->mb_stride;
2164 s->lambda = s->lambda_table[mb_xy];
2167 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2168 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2169 s->dquant = s->qscale - last_qp;
2171 if (s->out_format == FMT_H263) {
2172 s->dquant = av_clip(s->dquant, -2, 2);
2174 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2176 if (s->pict_type == AV_PICTURE_TYPE_B) {
2177 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2180 if (s->mv_type == MV_TYPE_8X8)
2186 ff_set_qscale(s, last_qp + s->dquant);
2187 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2188 ff_set_qscale(s, s->qscale + s->dquant);
2190 wrap_y = s->linesize;
2191 wrap_c = s->uvlinesize;
2192 ptr_y = s->new_picture.f->data[0] +
2193 (mb_y * 16 * wrap_y) + mb_x * 16;
2194 ptr_cb = s->new_picture.f->data[1] +
2195 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2196 ptr_cr = s->new_picture.f->data[2] +
2197 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2199 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2200 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2201 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2202 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2203 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2205 16, 16, mb_x * 16, mb_y * 16,
2206 s->width, s->height);
2208 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2210 mb_block_width, mb_block_height,
2211 mb_x * mb_block_width, mb_y * mb_block_height,
2213 ptr_cb = ebuf + 16 * wrap_y;
2214 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2216 mb_block_width, mb_block_height,
2217 mb_x * mb_block_width, mb_y * mb_block_height,
2219 ptr_cr = ebuf + 16 * wrap_y + 16;
2223 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2224 int progressive_score, interlaced_score;
2226 s->interlaced_dct = 0;
2227 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2228 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2229 NULL, wrap_y, 8) - 400;
2231 if (progressive_score > 0) {
2232 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2233 NULL, wrap_y * 2, 8) +
2234 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2235 NULL, wrap_y * 2, 8);
2236 if (progressive_score > interlaced_score) {
2237 s->interlaced_dct = 1;
2239 dct_offset = wrap_y;
2240 uv_dct_offset = wrap_c;
2242 if (s->chroma_format == CHROMA_422 ||
2243 s->chroma_format == CHROMA_444)
2249 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2250 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2251 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2252 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2254 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2258 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2259 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2260 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2261 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2262 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2263 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2264 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2265 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2266 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2267 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2268 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2269 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2273 op_pixels_func (*op_pix)[4];
2274 qpel_mc_func (*op_qpix)[16];
2275 uint8_t *dest_y, *dest_cb, *dest_cr;
2277 dest_y = s->dest[0];
2278 dest_cb = s->dest[1];
2279 dest_cr = s->dest[2];
2281 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2282 op_pix = s->hdsp.put_pixels_tab;
2283 op_qpix = s->qdsp.put_qpel_pixels_tab;
2285 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2286 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2289 if (s->mv_dir & MV_DIR_FORWARD) {
2290 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2291 s->last_picture.f->data,
2293 op_pix = s->hdsp.avg_pixels_tab;
2294 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2296 if (s->mv_dir & MV_DIR_BACKWARD) {
2297 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2298 s->next_picture.f->data,
2302 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2303 int progressive_score, interlaced_score;
2305 s->interlaced_dct = 0;
2306 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2307 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2311 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2312 progressive_score -= 400;
2314 if (progressive_score > 0) {
2315 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2317 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2321 if (progressive_score > interlaced_score) {
2322 s->interlaced_dct = 1;
2324 dct_offset = wrap_y;
2325 uv_dct_offset = wrap_c;
2327 if (s->chroma_format == CHROMA_422)
2333 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2334 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2335 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2336 dest_y + dct_offset, wrap_y);
2337 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2338 dest_y + dct_offset + 8, wrap_y);
2340 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2344 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2345 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2346 if (!s->chroma_y_shift) { /* 422 */
2347 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2348 dest_cb + uv_dct_offset, wrap_c);
2349 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2350 dest_cr + uv_dct_offset, wrap_c);
2353 /* pre quantization */
2354 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2355 2 * s->qscale * s->qscale) {
2357 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2359 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2361 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2362 wrap_y, 8) < 20 * s->qscale)
2364 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2365 wrap_y, 8) < 20 * s->qscale)
2367 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2369 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2371 if (!s->chroma_y_shift) { /* 422 */
2372 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2373 dest_cb + uv_dct_offset,
2374 wrap_c, 8) < 20 * s->qscale)
2376 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2377 dest_cr + uv_dct_offset,
2378 wrap_c, 8) < 20 * s->qscale)
2384 if (s->quantizer_noise_shaping) {
2386 get_visual_weight(weight[0], ptr_y , wrap_y);
2388 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2390 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2392 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2394 get_visual_weight(weight[4], ptr_cb , wrap_c);
2396 get_visual_weight(weight[5], ptr_cr , wrap_c);
2397 if (!s->chroma_y_shift) { /* 422 */
2399 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2402 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2405 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2408 /* DCT & quantize */
2409 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2411 for (i = 0; i < mb_block_count; i++) {
2414 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2415 // FIXME we could decide to change to quantizer instead of
2417 // JS: I don't think that would be a good idea it could lower
2418 // quality instead of improve it. Just INTRADC clipping
2419 // deserves changes in quantizer
2421 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2423 s->block_last_index[i] = -1;
2425 if (s->quantizer_noise_shaping) {
2426 for (i = 0; i < mb_block_count; i++) {
2428 s->block_last_index[i] =
2429 dct_quantize_refine(s, s->block[i], weight[i],
2430 orig[i], i, s->qscale);
2435 if (s->luma_elim_threshold && !s->mb_intra)
2436 for (i = 0; i < 4; i++)
2437 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2438 if (s->chroma_elim_threshold && !s->mb_intra)
2439 for (i = 4; i < mb_block_count; i++)
2440 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2442 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2443 for (i = 0; i < mb_block_count; i++) {
2444 if (s->block_last_index[i] == -1)
2445 s->coded_score[i] = INT_MAX / 256;
2450 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2451 s->block_last_index[4] =
2452 s->block_last_index[5] = 0;
2454 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2455 if (!s->chroma_y_shift) { /* 422 / 444 */
2456 for (i=6; i<12; i++) {
2457 s->block_last_index[i] = 0;
2458 s->block[i][0] = s->block[4][0];
2463 // non c quantize code returns incorrect block_last_index FIXME
2464 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2465 for (i = 0; i < mb_block_count; i++) {
2467 if (s->block_last_index[i] > 0) {
2468 for (j = 63; j > 0; j--) {
2469 if (s->block[i][s->intra_scantable.permutated[j]])
2472 s->block_last_index[i] = j;
2477 /* huffman encode */
2478 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2479 case AV_CODEC_ID_MPEG1VIDEO:
2480 case AV_CODEC_ID_MPEG2VIDEO:
2481 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2482 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2484 case AV_CODEC_ID_MPEG4:
2485 if (CONFIG_MPEG4_ENCODER)
2486 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2488 case AV_CODEC_ID_MSMPEG4V2:
2489 case AV_CODEC_ID_MSMPEG4V3:
2490 case AV_CODEC_ID_WMV1:
2491 if (CONFIG_MSMPEG4_ENCODER)
2492 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2494 case AV_CODEC_ID_WMV2:
2495 if (CONFIG_WMV2_ENCODER)
2496 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2498 case AV_CODEC_ID_H261:
2499 if (CONFIG_H261_ENCODER)
2500 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2502 case AV_CODEC_ID_H263:
2503 case AV_CODEC_ID_H263P:
2504 case AV_CODEC_ID_FLV1:
2505 case AV_CODEC_ID_RV10:
2506 case AV_CODEC_ID_RV20:
2507 if (CONFIG_H263_ENCODER)
2508 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2510 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2511 case AV_CODEC_ID_MJPEG:
2512 case AV_CODEC_ID_AMV:
2513 ff_mjpeg_encode_mb(s, s->block);
2516 case AV_CODEC_ID_SPEEDHQ:
2517 if (CONFIG_SPEEDHQ_ENCODER)
2518 ff_speedhq_encode_mb(s, s->block);
2525 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2527 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2528 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2529 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2532 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2535 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2538 d->mb_skip_run= s->mb_skip_run;
2540 d->last_dc[i] = s->last_dc[i];
2543 d->mv_bits= s->mv_bits;
2544 d->i_tex_bits= s->i_tex_bits;
2545 d->p_tex_bits= s->p_tex_bits;
2546 d->i_count= s->i_count;
2547 d->f_count= s->f_count;
2548 d->b_count= s->b_count;
2549 d->skip_count= s->skip_count;
2550 d->misc_bits= s->misc_bits;
2554 d->qscale= s->qscale;
2555 d->dquant= s->dquant;
2557 d->esc3_level_length= s->esc3_level_length;
2560 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2563 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2564 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2567 d->mb_skip_run= s->mb_skip_run;
2569 d->last_dc[i] = s->last_dc[i];
2572 d->mv_bits= s->mv_bits;
2573 d->i_tex_bits= s->i_tex_bits;
2574 d->p_tex_bits= s->p_tex_bits;
2575 d->i_count= s->i_count;
2576 d->f_count= s->f_count;
2577 d->b_count= s->b_count;
2578 d->skip_count= s->skip_count;
2579 d->misc_bits= s->misc_bits;
2581 d->mb_intra= s->mb_intra;
2582 d->mb_skipped= s->mb_skipped;
2583 d->mv_type= s->mv_type;
2584 d->mv_dir= s->mv_dir;
2586 if(s->data_partitioning){
2588 d->tex_pb= s->tex_pb;
2592 d->block_last_index[i]= s->block_last_index[i];
2593 d->interlaced_dct= s->interlaced_dct;
2594 d->qscale= s->qscale;
2596 d->esc3_level_length= s->esc3_level_length;
2599 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2600 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2601 int *dmin, int *next_block, int motion_x, int motion_y)
2604 uint8_t *dest_backup[3];
2606 copy_context_before_encode(s, backup, type);
2608 s->block= s->blocks[*next_block];
2609 s->pb= pb[*next_block];
2610 if(s->data_partitioning){
2611 s->pb2 = pb2 [*next_block];
2612 s->tex_pb= tex_pb[*next_block];
2616 memcpy(dest_backup, s->dest, sizeof(s->dest));
2617 s->dest[0] = s->sc.rd_scratchpad;
2618 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2619 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2620 av_assert0(s->linesize >= 32); //FIXME
2623 encode_mb(s, motion_x, motion_y);
2625 score= put_bits_count(&s->pb);
2626 if(s->data_partitioning){
2627 score+= put_bits_count(&s->pb2);
2628 score+= put_bits_count(&s->tex_pb);
2631 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2632 ff_mpv_reconstruct_mb(s, s->block);
2634 score *= s->lambda2;
2635 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2639 memcpy(s->dest, dest_backup, sizeof(s->dest));
2646 copy_context_after_encode(best, s, type);
2650 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2651 const uint32_t *sq = ff_square_tab + 256;
2656 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2657 else if(w==8 && h==8)
2658 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2662 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2671 static int sse_mb(MpegEncContext *s){
2675 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2676 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2679 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2680 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2681 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2682 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2684 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2685 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2686 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2689 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2690 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2691 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2694 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2695 MpegEncContext *s= *(void**)arg;
2699 s->me.dia_size= s->avctx->pre_dia_size;
2700 s->first_slice_line=1;
2701 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2702 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2703 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2705 s->first_slice_line=0;
2713 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2714 MpegEncContext *s= *(void**)arg;
2716 s->me.dia_size= s->avctx->dia_size;
2717 s->first_slice_line=1;
2718 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2719 s->mb_x=0; //for block init below
2720 ff_init_block_index(s);
2721 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2722 s->block_index[0]+=2;
2723 s->block_index[1]+=2;
2724 s->block_index[2]+=2;
2725 s->block_index[3]+=2;
2727 /* compute motion vector & mb_type and store in context */
2728 if(s->pict_type==AV_PICTURE_TYPE_B)
2729 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2731 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2733 s->first_slice_line=0;
2738 static int mb_var_thread(AVCodecContext *c, void *arg){
2739 MpegEncContext *s= *(void**)arg;
2742 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2743 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2746 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2748 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2750 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2751 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2753 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2754 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2755 s->me.mb_var_sum_temp += varc;
2761 static void write_slice_end(MpegEncContext *s){
2762 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2763 if(s->partitioned_frame){
2764 ff_mpeg4_merge_partitions(s);
2767 ff_mpeg4_stuffing(&s->pb);
2768 } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2769 s->out_format == FMT_MJPEG) {
2770 ff_mjpeg_encode_stuffing(s);
2771 } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2772 ff_speedhq_end_slice(s);
2775 flush_put_bits(&s->pb);
2777 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2778 s->misc_bits+= get_bits_diff(s);
2781 static void write_mb_info(MpegEncContext *s)
2783 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2784 int offset = put_bits_count(&s->pb);
2785 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2786 int gobn = s->mb_y / s->gob_index;
2788 if (CONFIG_H263_ENCODER)
2789 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2790 bytestream_put_le32(&ptr, offset);
2791 bytestream_put_byte(&ptr, s->qscale);
2792 bytestream_put_byte(&ptr, gobn);
2793 bytestream_put_le16(&ptr, mba);
2794 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2795 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2796 /* 4MV not implemented */
2797 bytestream_put_byte(&ptr, 0); /* hmv2 */
2798 bytestream_put_byte(&ptr, 0); /* vmv2 */
2801 static void update_mb_info(MpegEncContext *s, int startcode)
2805 if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2806 s->mb_info_size += 12;
2807 s->prev_mb_info = s->last_mb_info;
2810 s->prev_mb_info = put_bytes_count(&s->pb, 0);
2811 /* This might have incremented mb_info_size above, and we return without
2812 * actually writing any info into that slot yet. But in that case,
2813 * this will be called again at the start of the after writing the
2814 * start code, actually writing the mb info. */
2818 s->last_mb_info = put_bytes_count(&s->pb, 0);
2819 if (!s->mb_info_size)
2820 s->mb_info_size += 12;
2824 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2826 if (put_bytes_left(&s->pb, 0) < threshold
2827 && s->slice_context_count == 1
2828 && s->pb.buf == s->avctx->internal->byte_buffer) {
2829 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2830 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2832 uint8_t *new_buffer = NULL;
2833 int new_buffer_size = 0;
2835 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2836 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2837 return AVERROR(ENOMEM);
2842 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2843 s->avctx->internal->byte_buffer_size + size_increase);
2845 return AVERROR(ENOMEM);
2847 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2848 av_free(s->avctx->internal->byte_buffer);
2849 s->avctx->internal->byte_buffer = new_buffer;
2850 s->avctx->internal->byte_buffer_size = new_buffer_size;
2851 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2852 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2853 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2855 if (put_bytes_left(&s->pb, 0) < threshold)
2856 return AVERROR(EINVAL);
2860 static int encode_thread(AVCodecContext *c, void *arg){
2861 MpegEncContext *s= *(void**)arg;
2862 int mb_x, mb_y, mb_y_order;
2863 int chr_h= 16>>s->chroma_y_shift;
2865 MpegEncContext best_s = { 0 }, backup_s;
2866 uint8_t bit_buf[2][MAX_MB_BYTES];
2867 uint8_t bit_buf2[2][MAX_MB_BYTES];
2868 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2869 PutBitContext pb[2], pb2[2], tex_pb[2];
2872 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2873 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2874 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2877 s->last_bits= put_bits_count(&s->pb);
2888 /* init last dc values */
2889 /* note: quant matrix value (8) is implied here */
2890 s->last_dc[i] = 128 << s->intra_dc_precision;
2892 s->current_picture.encoding_error[i] = 0;
2894 if(s->codec_id==AV_CODEC_ID_AMV){
2895 s->last_dc[0] = 128*8/13;
2896 s->last_dc[1] = 128*8/14;
2897 s->last_dc[2] = 128*8/14;
2900 memset(s->last_mv, 0, sizeof(s->last_mv));
2904 switch(s->codec_id){
2905 case AV_CODEC_ID_H263:
2906 case AV_CODEC_ID_H263P:
2907 case AV_CODEC_ID_FLV1:
2908 if (CONFIG_H263_ENCODER)
2909 s->gob_index = H263_GOB_HEIGHT(s->height);
2911 case AV_CODEC_ID_MPEG4:
2912 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2913 ff_mpeg4_init_partitions(s);
2919 s->first_slice_line = 1;
2920 s->ptr_lastgob = s->pb.buf;
2921 for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2922 if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
2924 mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
2925 if (first_in_slice && mb_y_order != s->start_mb_y)
2926 ff_speedhq_end_slice(s);
2927 s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
2934 ff_set_qscale(s, s->qscale);
2935 ff_init_block_index(s);
2937 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2938 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2939 int mb_type= s->mb_type[xy];
2943 int size_increase = s->avctx->internal->byte_buffer_size/4
2944 + s->mb_width*MAX_MB_BYTES;
2946 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2947 if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
2948 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2951 if(s->data_partitioning){
2952 if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
2953 put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
2954 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2960 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2961 ff_update_block_index(s);
2963 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2964 ff_h261_reorder_mb_index(s);
2965 xy= s->mb_y*s->mb_stride + s->mb_x;
2966 mb_type= s->mb_type[xy];
2969 /* write gob / video packet header */
2971 int current_packet_size, is_gob_start;
2973 current_packet_size = put_bytes_count(&s->pb, 1)
2974 - (s->ptr_lastgob - s->pb.buf);
2976 is_gob_start = s->rtp_payload_size &&
2977 current_packet_size >= s->rtp_payload_size &&
2980 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2982 switch(s->codec_id){
2983 case AV_CODEC_ID_H263:
2984 case AV_CODEC_ID_H263P:
2985 if(!s->h263_slice_structured)
2986 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2988 case AV_CODEC_ID_MPEG2VIDEO:
2989 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2990 case AV_CODEC_ID_MPEG1VIDEO:
2991 if(s->mb_skip_run) is_gob_start=0;
2993 case AV_CODEC_ID_MJPEG:
2994 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2999 if(s->start_mb_y != mb_y || mb_x!=0){
3002 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3003 ff_mpeg4_init_partitions(s);
3007 av_assert2((put_bits_count(&s->pb)&7) == 0);
3008 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3010 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3011 int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
3012 int d = 100 / s->error_rate;
3014 current_packet_size=0;
3015 s->pb.buf_ptr= s->ptr_lastgob;
3016 av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3020 #if FF_API_RTP_CALLBACK
3021 FF_DISABLE_DEPRECATION_WARNINGS
3022 if (s->avctx->rtp_callback){
3023 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3024 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3026 FF_ENABLE_DEPRECATION_WARNINGS
3028 update_mb_info(s, 1);
3030 switch(s->codec_id){
3031 case AV_CODEC_ID_MPEG4:
3032 if (CONFIG_MPEG4_ENCODER) {
3033 ff_mpeg4_encode_video_packet_header(s);
3034 ff_mpeg4_clean_buffers(s);
3037 case AV_CODEC_ID_MPEG1VIDEO:
3038 case AV_CODEC_ID_MPEG2VIDEO:
3039 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3040 ff_mpeg1_encode_slice_header(s);
3041 ff_mpeg1_clean_buffers(s);
3044 case AV_CODEC_ID_H263:
3045 case AV_CODEC_ID_H263P:
3046 if (CONFIG_H263_ENCODER)
3047 ff_h263_encode_gob_header(s, mb_y);
3051 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3052 int bits= put_bits_count(&s->pb);
3053 s->misc_bits+= bits - s->last_bits;
3057 s->ptr_lastgob += current_packet_size;
3058 s->first_slice_line=1;
3059 s->resync_mb_x=mb_x;
3060 s->resync_mb_y=mb_y;
3064 if( (s->resync_mb_x == s->mb_x)
3065 && s->resync_mb_y+1 == s->mb_y){
3066 s->first_slice_line=0;
3070 s->dquant=0; //only for QP_RD
3072 update_mb_info(s, 0);
3074 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3076 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3078 copy_context_before_encode(&backup_s, s, -1);
3080 best_s.data_partitioning= s->data_partitioning;
3081 best_s.partitioned_frame= s->partitioned_frame;
3082 if(s->data_partitioning){
3083 backup_s.pb2= s->pb2;
3084 backup_s.tex_pb= s->tex_pb;
3087 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3088 s->mv_dir = MV_DIR_FORWARD;
3089 s->mv_type = MV_TYPE_16X16;
3091 s->mv[0][0][0] = s->p_mv_table[xy][0];
3092 s->mv[0][0][1] = s->p_mv_table[xy][1];
3093 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3094 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3096 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3097 s->mv_dir = MV_DIR_FORWARD;
3098 s->mv_type = MV_TYPE_FIELD;
3101 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3102 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3103 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3105 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3106 &dmin, &next_block, 0, 0);
3108 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3109 s->mv_dir = MV_DIR_FORWARD;
3110 s->mv_type = MV_TYPE_16X16;
3114 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3115 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3117 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3118 s->mv_dir = MV_DIR_FORWARD;
3119 s->mv_type = MV_TYPE_8X8;
3122 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3123 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3125 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3126 &dmin, &next_block, 0, 0);
3128 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3129 s->mv_dir = MV_DIR_FORWARD;
3130 s->mv_type = MV_TYPE_16X16;
3132 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3133 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3134 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3135 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3137 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3138 s->mv_dir = MV_DIR_BACKWARD;
3139 s->mv_type = MV_TYPE_16X16;
3141 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3142 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3143 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3144 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3146 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3147 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3148 s->mv_type = MV_TYPE_16X16;
3150 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3151 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3152 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3153 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3154 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3155 &dmin, &next_block, 0, 0);
3157 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3158 s->mv_dir = MV_DIR_FORWARD;
3159 s->mv_type = MV_TYPE_FIELD;
3162 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3163 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3164 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3166 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3167 &dmin, &next_block, 0, 0);
3169 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3170 s->mv_dir = MV_DIR_BACKWARD;
3171 s->mv_type = MV_TYPE_FIELD;
3174 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3175 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3176 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3178 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3179 &dmin, &next_block, 0, 0);
3181 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3182 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3183 s->mv_type = MV_TYPE_FIELD;
3185 for(dir=0; dir<2; dir++){
3187 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3188 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3189 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3192 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3193 &dmin, &next_block, 0, 0);
3195 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3197 s->mv_type = MV_TYPE_16X16;
3201 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3202 &dmin, &next_block, 0, 0);
3203 if(s->h263_pred || s->h263_aic){
3205 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3207 ff_clean_intra_table_entries(s); //old mode?
3211 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3212 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3213 const int last_qp= backup_s.qscale;
3216 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3217 static const int dquant_tab[4]={-1,1,-2,2};
3218 int storecoefs = s->mb_intra && s->dc_val[0];
3220 av_assert2(backup_s.dquant == 0);
3223 s->mv_dir= best_s.mv_dir;
3224 s->mv_type = MV_TYPE_16X16;
3225 s->mb_intra= best_s.mb_intra;
3226 s->mv[0][0][0] = best_s.mv[0][0][0];
3227 s->mv[0][0][1] = best_s.mv[0][0][1];
3228 s->mv[1][0][0] = best_s.mv[1][0][0];
3229 s->mv[1][0][1] = best_s.mv[1][0][1];
3231 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3232 for(; qpi<4; qpi++){
3233 int dquant= dquant_tab[qpi];
3234 qp= last_qp + dquant;
3235 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3237 backup_s.dquant= dquant;
3240 dc[i]= s->dc_val[0][ s->block_index[i] ];
3241 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3245 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3246 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3247 if(best_s.qscale != qp){
3250 s->dc_val[0][ s->block_index[i] ]= dc[i];
3251 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3258 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3259 int mx= s->b_direct_mv_table[xy][0];
3260 int my= s->b_direct_mv_table[xy][1];
3262 backup_s.dquant = 0;
3263 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3265 ff_mpeg4_set_direct_mv(s, mx, my);
3266 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3267 &dmin, &next_block, mx, my);
3269 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3270 backup_s.dquant = 0;
3271 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3273 ff_mpeg4_set_direct_mv(s, 0, 0);
3274 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3275 &dmin, &next_block, 0, 0);
3277 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3280 coded |= s->block_last_index[i];
3283 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3284 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3285 mx=my=0; //FIXME find the one we actually used
3286 ff_mpeg4_set_direct_mv(s, mx, my);
3287 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3295 s->mv_dir= best_s.mv_dir;
3296 s->mv_type = best_s.mv_type;
3298 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3299 s->mv[0][0][1] = best_s.mv[0][0][1];
3300 s->mv[1][0][0] = best_s.mv[1][0][0];
3301 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3304 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3305 &dmin, &next_block, mx, my);
3310 s->current_picture.qscale_table[xy] = best_s.qscale;
3312 copy_context_after_encode(s, &best_s, -1);
3314 pb_bits_count= put_bits_count(&s->pb);
3315 flush_put_bits(&s->pb);
3316 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3319 if(s->data_partitioning){
3320 pb2_bits_count= put_bits_count(&s->pb2);
3321 flush_put_bits(&s->pb2);
3322 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3323 s->pb2= backup_s.pb2;
3325 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3326 flush_put_bits(&s->tex_pb);
3327 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3328 s->tex_pb= backup_s.tex_pb;
3330 s->last_bits= put_bits_count(&s->pb);
3332 if (CONFIG_H263_ENCODER &&
3333 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3334 ff_h263_update_motion_val(s);
3336 if(next_block==0){ //FIXME 16 vs linesize16
3337 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3338 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3339 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3342 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3343 ff_mpv_reconstruct_mb(s, s->block);
3345 int motion_x = 0, motion_y = 0;
3346 s->mv_type=MV_TYPE_16X16;
3347 // only one MB-Type possible
3350 case CANDIDATE_MB_TYPE_INTRA:
3353 motion_x= s->mv[0][0][0] = 0;
3354 motion_y= s->mv[0][0][1] = 0;
3356 case CANDIDATE_MB_TYPE_INTER:
3357 s->mv_dir = MV_DIR_FORWARD;
3359 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3360 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3362 case CANDIDATE_MB_TYPE_INTER_I:
3363 s->mv_dir = MV_DIR_FORWARD;
3364 s->mv_type = MV_TYPE_FIELD;
3367 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3368 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3369 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3372 case CANDIDATE_MB_TYPE_INTER4V:
3373 s->mv_dir = MV_DIR_FORWARD;
3374 s->mv_type = MV_TYPE_8X8;
3377 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3378 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3381 case CANDIDATE_MB_TYPE_DIRECT:
3382 if (CONFIG_MPEG4_ENCODER) {
3383 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3385 motion_x=s->b_direct_mv_table[xy][0];
3386 motion_y=s->b_direct_mv_table[xy][1];
3387 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3390 case CANDIDATE_MB_TYPE_DIRECT0:
3391 if (CONFIG_MPEG4_ENCODER) {
3392 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3394 ff_mpeg4_set_direct_mv(s, 0, 0);
3397 case CANDIDATE_MB_TYPE_BIDIR:
3398 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3400 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3401 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3402 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3403 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3405 case CANDIDATE_MB_TYPE_BACKWARD:
3406 s->mv_dir = MV_DIR_BACKWARD;
3408 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3409 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3411 case CANDIDATE_MB_TYPE_FORWARD:
3412 s->mv_dir = MV_DIR_FORWARD;
3414 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3415 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3417 case CANDIDATE_MB_TYPE_FORWARD_I:
3418 s->mv_dir = MV_DIR_FORWARD;
3419 s->mv_type = MV_TYPE_FIELD;
3422 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3423 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3424 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3427 case CANDIDATE_MB_TYPE_BACKWARD_I:
3428 s->mv_dir = MV_DIR_BACKWARD;
3429 s->mv_type = MV_TYPE_FIELD;
3432 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3433 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3434 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3437 case CANDIDATE_MB_TYPE_BIDIR_I:
3438 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3439 s->mv_type = MV_TYPE_FIELD;
3441 for(dir=0; dir<2; dir++){
3443 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3444 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3445 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3450 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3453 encode_mb(s, motion_x, motion_y);
3455 // RAL: Update last macroblock type
3456 s->last_mv_dir = s->mv_dir;
3458 if (CONFIG_H263_ENCODER &&
3459 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3460 ff_h263_update_motion_val(s);
3462 ff_mpv_reconstruct_mb(s, s->block);
3465 /* clean the MV table in IPS frames for direct mode in B-frames */
3466 if(s->mb_intra /* && I,P,S_TYPE */){
3467 s->p_mv_table[xy][0]=0;
3468 s->p_mv_table[xy][1]=0;
3471 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3475 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3476 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3478 s->current_picture.encoding_error[0] += sse(
3479 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3480 s->dest[0], w, h, s->linesize);
3481 s->current_picture.encoding_error[1] += sse(
3482 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3483 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3484 s->current_picture.encoding_error[2] += sse(
3485 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3486 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3489 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3490 ff_h263_loop_filter(s);
3492 ff_dlog(s->avctx, "MB %d %d bits\n",
3493 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3497 //not beautiful here but we must write it before flushing so it has to be here
3498 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3499 ff_msmpeg4_encode_ext_header(s);
3503 #if FF_API_RTP_CALLBACK
3504 FF_DISABLE_DEPRECATION_WARNINGS
3505 /* Send the last GOB if RTP */
3506 if (s->avctx->rtp_callback) {
3507 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3508 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3509 /* Call the RTP callback to send the last GOB */
3511 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3513 FF_ENABLE_DEPRECATION_WARNINGS
3519 #define MERGE(field) dst->field += src->field; src->field=0
3520 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3521 MERGE(me.scene_change_score);
3522 MERGE(me.mc_mb_var_sum_temp);
3523 MERGE(me.mb_var_sum_temp);
3526 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3529 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3530 MERGE(dct_count[1]);
3539 MERGE(er.error_count);
3540 MERGE(padding_bug_score);
3541 MERGE(current_picture.encoding_error[0]);
3542 MERGE(current_picture.encoding_error[1]);
3543 MERGE(current_picture.encoding_error[2]);
3545 if (dst->noise_reduction){
3546 for(i=0; i<64; i++){
3547 MERGE(dct_error_sum[0][i]);
3548 MERGE(dct_error_sum[1][i]);
3552 av_assert1(put_bits_count(&src->pb) % 8 ==0);
3553 av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3554 ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3555 flush_put_bits(&dst->pb);
3558 static int estimate_qp(MpegEncContext *s, int dry_run){
3559 if (s->next_lambda){
3560 s->current_picture_ptr->f->quality =
3561 s->current_picture.f->quality = s->next_lambda;
3562 if(!dry_run) s->next_lambda= 0;
3563 } else if (!s->fixed_qscale) {
3564 int quality = ff_rate_estimate_qscale(s, dry_run);
3565 s->current_picture_ptr->f->quality =
3566 s->current_picture.f->quality = quality;
3567 if (s->current_picture.f->quality < 0)
3571 if(s->adaptive_quant){
3572 switch(s->codec_id){
3573 case AV_CODEC_ID_MPEG4:
3574 if (CONFIG_MPEG4_ENCODER)
3575 ff_clean_mpeg4_qscales(s);
3577 case AV_CODEC_ID_H263:
3578 case AV_CODEC_ID_H263P:
3579 case AV_CODEC_ID_FLV1:
3580 if (CONFIG_H263_ENCODER)
3581 ff_clean_h263_qscales(s);
3584 ff_init_qscale_tab(s);
3587 s->lambda= s->lambda_table[0];
3590 s->lambda = s->current_picture.f->quality;
3595 /* must be called before writing the header */
3596 static void set_frame_distances(MpegEncContext * s){
3597 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3598 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3600 if(s->pict_type==AV_PICTURE_TYPE_B){
3601 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3602 av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3604 s->pp_time= s->time - s->last_non_b_time;
3605 s->last_non_b_time= s->time;
3606 av_assert1(s->picture_number==0 || s->pp_time > 0);
3610 static int encode_picture(MpegEncContext *s, int picture_number)
3614 int context_count = s->slice_context_count;
3616 s->picture_number = picture_number;
3618 /* Reset the average MB variance */
3619 s->me.mb_var_sum_temp =
3620 s->me.mc_mb_var_sum_temp = 0;
3622 /* we need to initialize some time vars before we can encode B-frames */
3623 // RAL: Condition added for MPEG1VIDEO
3624 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3625 set_frame_distances(s);
3626 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3627 ff_set_mpeg4_time(s);
3629 s->me.scene_change_score=0;
3631 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3633 if(s->pict_type==AV_PICTURE_TYPE_I){
3634 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3635 else s->no_rounding=0;
3636 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3637 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3638 s->no_rounding ^= 1;
3641 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3642 if (estimate_qp(s,1) < 0)
3644 ff_get_2pass_fcode(s);
3645 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3646 if(s->pict_type==AV_PICTURE_TYPE_B)
3647 s->lambda= s->last_lambda_for[s->pict_type];
3649 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3653 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3654 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3655 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3656 s->q_chroma_intra_matrix = s->q_intra_matrix;
3657 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3660 s->mb_intra=0; //for the rate distortion & bit compare functions
3661 for(i=1; i<context_count; i++){
3662 ret = ff_update_duplicate_context(s->thread_context[i], s);
3670 /* Estimate motion for every MB */
3671 if(s->pict_type != AV_PICTURE_TYPE_I){
3672 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3673 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3674 if (s->pict_type != AV_PICTURE_TYPE_B) {
3675 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3677 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3681 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3682 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3684 for(i=0; i<s->mb_stride*s->mb_height; i++)
3685 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3687 if(!s->fixed_qscale){
3688 /* finding spatial complexity for I-frame rate control */
3689 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3692 for(i=1; i<context_count; i++){
3693 merge_context_after_me(s, s->thread_context[i]);
3695 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3696 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3699 if (s->me.scene_change_score > s->scenechange_threshold &&
3700 s->pict_type == AV_PICTURE_TYPE_P) {
3701 s->pict_type= AV_PICTURE_TYPE_I;
3702 for(i=0; i<s->mb_stride*s->mb_height; i++)
3703 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3704 if(s->msmpeg4_version >= 3)
3706 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3707 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3711 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3712 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3714 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3716 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3717 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3718 s->f_code= FFMAX3(s->f_code, a, b);
3721 ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
3722 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3723 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3727 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3728 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3733 if(s->pict_type==AV_PICTURE_TYPE_B){
3736 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3737 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3738 s->f_code = FFMAX(a, b);
3740 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3741 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3742 s->b_code = FFMAX(a, b);
3744 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3745 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3746 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3747 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3748 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3750 for(dir=0; dir<2; dir++){
3753 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3754 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3755 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3756 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3764 if (estimate_qp(s, 0) < 0)
3767 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3768 s->pict_type == AV_PICTURE_TYPE_I &&
3769 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3770 s->qscale= 3; //reduce clipping problems
3772 if (s->out_format == FMT_MJPEG) {
3773 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3774 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3776 if (s->avctx->intra_matrix) {
3778 luma_matrix = s->avctx->intra_matrix;
3780 if (s->avctx->chroma_intra_matrix)
3781 chroma_matrix = s->avctx->chroma_intra_matrix;
3783 /* for mjpeg, we do include qscale in the matrix */
3785 int j = s->idsp.idct_permutation[i];
3787 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3788 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3790 s->y_dc_scale_table=
3791 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3792 s->chroma_intra_matrix[0] =
3793 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3794 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3795 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3796 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3797 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3800 if(s->codec_id == AV_CODEC_ID_AMV){
3801 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3802 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3804 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3806 s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3807 s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3809 s->y_dc_scale_table= y;
3810 s->c_dc_scale_table= c;
3811 s->intra_matrix[0] = 13;
3812 s->chroma_intra_matrix[0] = 14;
3813 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3814 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3815 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3816 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3820 if (s->out_format == FMT_SPEEDHQ) {
3821 s->y_dc_scale_table=
3822 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3825 //FIXME var duplication
3826 s->current_picture_ptr->f->key_frame =
3827 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3828 s->current_picture_ptr->f->pict_type =
3829 s->current_picture.f->pict_type = s->pict_type;
3831 if (s->current_picture.f->key_frame)
3832 s->picture_in_gop_number=0;
3834 s->mb_x = s->mb_y = 0;
3835 s->last_bits= put_bits_count(&s->pb);
3836 switch(s->out_format) {
3837 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3839 /* s->huffman == HUFFMAN_TABLE_OPTIMAL can only be true for MJPEG. */
3840 if (!CONFIG_MJPEG_ENCODER || s->huffman != HUFFMAN_TABLE_OPTIMAL)
3841 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3842 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3846 if (CONFIG_SPEEDHQ_ENCODER)
3847 ff_speedhq_encode_picture_header(s);
3850 if (CONFIG_H261_ENCODER)
3851 ff_h261_encode_picture_header(s, picture_number);
3854 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3855 ff_wmv2_encode_picture_header(s, picture_number);
3856 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3857 ff_msmpeg4_encode_picture_header(s, picture_number);
3858 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3859 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3862 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3863 ret = ff_rv10_encode_picture_header(s, picture_number);
3867 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3868 ff_rv20_encode_picture_header(s, picture_number);
3869 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3870 ff_flv_encode_picture_header(s, picture_number);
3871 else if (CONFIG_H263_ENCODER)
3872 ff_h263_encode_picture_header(s, picture_number);
3875 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3876 ff_mpeg1_encode_picture_header(s, picture_number);
3881 bits= put_bits_count(&s->pb);
3882 s->header_bits= bits - s->last_bits;
3884 for(i=1; i<context_count; i++){
3885 update_duplicate_context_after_me(s->thread_context[i], s);
3887 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3888 for(i=1; i<context_count; i++){
3889 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3890 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3891 merge_context_after_encode(s, s->thread_context[i]);
3897 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3898 const int intra= s->mb_intra;
3901 s->dct_count[intra]++;
3903 for(i=0; i<64; i++){
3904 int level= block[i];
3908 s->dct_error_sum[intra][i] += level;
3909 level -= s->dct_offset[intra][i];
3910 if(level<0) level=0;
3912 s->dct_error_sum[intra][i] -= level;
3913 level += s->dct_offset[intra][i];
3914 if(level>0) level=0;
3921 static int dct_quantize_trellis_c(MpegEncContext *s,
3922 int16_t *block, int n,
3923 int qscale, int *overflow){
3925 const uint16_t *matrix;
3926 const uint8_t *scantable;
3927 const uint8_t *perm_scantable;
3929 unsigned int threshold1, threshold2;
3941 int coeff_count[64];
3942 int qmul, qadd, start_i, last_non_zero, i, dc;
3943 const int esc_length= s->ac_esc_length;
3945 uint8_t * last_length;
3946 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3949 s->fdsp.fdct(block);
3951 if(s->dct_error_sum)
3952 s->denoise_dct(s, block);
3954 qadd= ((qscale-1)|1)*8;
3956 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3957 else mpeg2_qscale = qscale << 1;
3961 scantable= s->intra_scantable.scantable;
3962 perm_scantable= s->intra_scantable.permutated;
3970 /* For AIC we skip quant/dequant of INTRADC */
3975 /* note: block[0] is assumed to be positive */
3976 block[0] = (block[0] + (q >> 1)) / q;
3979 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3980 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3981 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3982 bias= 1<<(QMAT_SHIFT-1);
3984 if (n > 3 && s->intra_chroma_ac_vlc_length) {
3985 length = s->intra_chroma_ac_vlc_length;
3986 last_length= s->intra_chroma_ac_vlc_last_length;
3988 length = s->intra_ac_vlc_length;
3989 last_length= s->intra_ac_vlc_last_length;
3992 scantable= s->inter_scantable.scantable;
3993 perm_scantable= s->inter_scantable.permutated;
3996 qmat = s->q_inter_matrix[qscale];
3997 matrix = s->inter_matrix;
3998 length = s->inter_ac_vlc_length;
3999 last_length= s->inter_ac_vlc_last_length;
4003 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4004 threshold2= (threshold1<<1);
4006 for(i=63; i>=start_i; i--) {
4007 const int j = scantable[i];
4008 int level = block[j] * qmat[j];
4010 if(((unsigned)(level+threshold1))>threshold2){
4016 for(i=start_i; i<=last_non_zero; i++) {
4017 const int j = scantable[i];
4018 int level = block[j] * qmat[j];
4020 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4021 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4022 if(((unsigned)(level+threshold1))>threshold2){
4024 level= (bias + level)>>QMAT_SHIFT;
4026 coeff[1][i]= level-1;
4027 // coeff[2][k]= level-2;
4029 level= (bias - level)>>QMAT_SHIFT;
4030 coeff[0][i]= -level;
4031 coeff[1][i]= -level+1;
4032 // coeff[2][k]= -level+2;
4034 coeff_count[i]= FFMIN(level, 2);
4035 av_assert2(coeff_count[i]);
4038 coeff[0][i]= (level>>31)|1;
4043 *overflow= s->max_qcoeff < max; //overflow might have happened
4045 if(last_non_zero < start_i){
4046 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4047 return last_non_zero;
4050 score_tab[start_i]= 0;
4051 survivor[0]= start_i;
4054 for(i=start_i; i<=last_non_zero; i++){
4055 int level_index, j, zero_distortion;
4056 int dct_coeff= FFABS(block[ scantable[i] ]);
4057 int best_score=256*256*256*120;
4059 if (s->fdsp.fdct == ff_fdct_ifast)
4060 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4061 zero_distortion= dct_coeff*dct_coeff;
4063 for(level_index=0; level_index < coeff_count[i]; level_index++){
4065 int level= coeff[level_index][i];
4066 const int alevel= FFABS(level);
4071 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4072 unquant_coeff= alevel*qmul + qadd;
4073 } else if(s->out_format == FMT_MJPEG) {
4074 j = s->idsp.idct_permutation[scantable[i]];
4075 unquant_coeff = alevel * matrix[j] * 8;
4077 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4079 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4080 unquant_coeff = (unquant_coeff - 1) | 1;
4082 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4083 unquant_coeff = (unquant_coeff - 1) | 1;
4088 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4090 if((level&(~127)) == 0){
4091 for(j=survivor_count-1; j>=0; j--){
4092 int run= i - survivor[j];
4093 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4094 score += score_tab[i-run];
4096 if(score < best_score){
4099 level_tab[i+1]= level-64;
4103 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4104 for(j=survivor_count-1; j>=0; j--){
4105 int run= i - survivor[j];
4106 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4107 score += score_tab[i-run];
4108 if(score < last_score){
4111 last_level= level-64;
4117 distortion += esc_length*lambda;
4118 for(j=survivor_count-1; j>=0; j--){
4119 int run= i - survivor[j];
4120 int score= distortion + score_tab[i-run];
4122 if(score < best_score){
4125 level_tab[i+1]= level-64;
4129 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4130 for(j=survivor_count-1; j>=0; j--){
4131 int run= i - survivor[j];
4132 int score= distortion + score_tab[i-run];
4133 if(score < last_score){
4136 last_level= level-64;
4144 score_tab[i+1]= best_score;
4146 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4147 if(last_non_zero <= 27){
4148 for(; survivor_count; survivor_count--){
4149 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4153 for(; survivor_count; survivor_count--){
4154 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4159 survivor[ survivor_count++ ]= i+1;
4162 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4163 last_score= 256*256*256*120;
4164 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4165 int score= score_tab[i];
4167 score += lambda * 2; // FIXME more exact?
4169 if(score < last_score){
4172 last_level= level_tab[i];
4173 last_run= run_tab[i];
4178 s->coded_score[n] = last_score;
4180 dc= FFABS(block[0]);
4181 last_non_zero= last_i - 1;
4182 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4184 if(last_non_zero < start_i)
4185 return last_non_zero;
4187 if(last_non_zero == 0 && start_i == 0){
4189 int best_score= dc * dc;
4191 for(i=0; i<coeff_count[0]; i++){
4192 int level= coeff[i][0];
4193 int alevel= FFABS(level);
4194 int unquant_coeff, score, distortion;
4196 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4197 unquant_coeff= (alevel*qmul + qadd)>>3;
4199 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4200 unquant_coeff = (unquant_coeff - 1) | 1;
4202 unquant_coeff = (unquant_coeff + 4) >> 3;
4203 unquant_coeff<<= 3 + 3;
4205 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4207 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4208 else score= distortion + esc_length*lambda;
4210 if(score < best_score){
4212 best_level= level - 64;
4215 block[0]= best_level;
4216 s->coded_score[n] = best_score - dc*dc;
4217 if(best_level == 0) return -1;
4218 else return last_non_zero;
4222 av_assert2(last_level);
4224 block[ perm_scantable[last_non_zero] ]= last_level;
4227 for(; i>start_i; i -= run_tab[i] + 1){
4228 block[ perm_scantable[i-1] ]= level_tab[i];
4231 return last_non_zero;
4234 static int16_t basis[64][64];
4236 static void build_basis(uint8_t *perm){
4243 double s= 0.25*(1<<BASIS_SHIFT);
4245 int perm_index= perm[index];
4246 if(i==0) s*= sqrt(0.5);
4247 if(j==0) s*= sqrt(0.5);
4248 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4255 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4256 int16_t *block, int16_t *weight, int16_t *orig,
4259 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4260 const uint8_t *scantable;
4261 const uint8_t *perm_scantable;
4262 // unsigned int threshold1, threshold2;
4267 int qmul, qadd, start_i, last_non_zero, i, dc;
4269 uint8_t * last_length;
4271 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4273 if(basis[0][0] == 0)
4274 build_basis(s->idsp.idct_permutation);
4279 scantable= s->intra_scantable.scantable;
4280 perm_scantable= s->intra_scantable.permutated;
4287 /* For AIC we skip quant/dequant of INTRADC */
4291 q <<= RECON_SHIFT-3;
4292 /* note: block[0] is assumed to be positive */
4294 // block[0] = (block[0] + (q >> 1)) / q;
4296 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4297 // bias= 1<<(QMAT_SHIFT-1);
4298 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4299 length = s->intra_chroma_ac_vlc_length;
4300 last_length= s->intra_chroma_ac_vlc_last_length;
4302 length = s->intra_ac_vlc_length;
4303 last_length= s->intra_ac_vlc_last_length;
4306 scantable= s->inter_scantable.scantable;
4307 perm_scantable= s->inter_scantable.permutated;
4310 length = s->inter_ac_vlc_length;
4311 last_length= s->inter_ac_vlc_last_length;
4313 last_non_zero = s->block_last_index[n];
4315 dc += (1<<(RECON_SHIFT-1));
4316 for(i=0; i<64; i++){
4317 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4321 for(i=0; i<64; i++){
4326 w= FFABS(weight[i]) + qns*one;
4327 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4330 // w=weight[i] = (63*qns + (w/2)) / w;
4333 av_assert2(w<(1<<6));
4336 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4340 for(i=start_i; i<=last_non_zero; i++){
4341 int j= perm_scantable[i];
4342 const int level= block[j];
4346 if(level<0) coeff= qmul*level - qadd;
4347 else coeff= qmul*level + qadd;
4348 run_tab[rle_index++]=run;
4351 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4358 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4361 int run2, best_unquant_change=0, analyze_gradient;
4362 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4364 if(analyze_gradient){
4365 for(i=0; i<64; i++){
4368 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4374 const int level= block[0];
4375 int change, old_coeff;
4377 av_assert2(s->mb_intra);
4381 for(change=-1; change<=1; change+=2){
4382 int new_level= level + change;
4383 int score, new_coeff;
4385 new_coeff= q*new_level;
4386 if(new_coeff >= 2048 || new_coeff < 0)
4389 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4390 new_coeff - old_coeff);
4391 if(score<best_score){
4394 best_change= change;
4395 best_unquant_change= new_coeff - old_coeff;
4402 run2= run_tab[rle_index++];
4406 for(i=start_i; i<64; i++){
4407 int j= perm_scantable[i];
4408 const int level= block[j];
4409 int change, old_coeff;
4411 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4415 if(level<0) old_coeff= qmul*level - qadd;
4416 else old_coeff= qmul*level + qadd;
4417 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4421 av_assert2(run2>=0 || i >= last_non_zero );
4424 for(change=-1; change<=1; change+=2){
4425 int new_level= level + change;
4426 int score, new_coeff, unquant_change;
4429 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4433 if(new_level<0) new_coeff= qmul*new_level - qadd;
4434 else new_coeff= qmul*new_level + qadd;
4435 if(new_coeff >= 2048 || new_coeff <= -2048)
4437 //FIXME check for overflow
4440 if(level < 63 && level > -63){
4441 if(i < last_non_zero)
4442 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4443 - length[UNI_AC_ENC_INDEX(run, level+64)];
4445 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4446 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4449 av_assert2(FFABS(new_level)==1);
4451 if(analyze_gradient){
4452 int g= d1[ scantable[i] ];
4453 if(g && (g^new_level) >= 0)
4457 if(i < last_non_zero){
4458 int next_i= i + run2 + 1;
4459 int next_level= block[ perm_scantable[next_i] ] + 64;
4461 if(next_level&(~127))
4464 if(next_i < last_non_zero)
4465 score += length[UNI_AC_ENC_INDEX(run, 65)]
4466 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4467 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4469 score += length[UNI_AC_ENC_INDEX(run, 65)]
4470 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4471 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4473 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4475 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4476 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4482 av_assert2(FFABS(level)==1);
4484 if(i < last_non_zero){
4485 int next_i= i + run2 + 1;
4486 int next_level= block[ perm_scantable[next_i] ] + 64;
4488 if(next_level&(~127))
4491 if(next_i < last_non_zero)
4492 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4493 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4494 - length[UNI_AC_ENC_INDEX(run, 65)];
4496 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4497 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4498 - length[UNI_AC_ENC_INDEX(run, 65)];
4500 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4502 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4503 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4510 unquant_change= new_coeff - old_coeff;
4511 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4513 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4515 if(score<best_score){
4518 best_change= change;
4519 best_unquant_change= unquant_change;
4523 prev_level= level + 64;
4524 if(prev_level&(~127))
4534 int j= perm_scantable[ best_coeff ];
4536 block[j] += best_change;
4538 if(best_coeff > last_non_zero){
4539 last_non_zero= best_coeff;
4540 av_assert2(block[j]);
4542 for(; last_non_zero>=start_i; last_non_zero--){
4543 if(block[perm_scantable[last_non_zero]])
4550 for(i=start_i; i<=last_non_zero; i++){
4551 int j= perm_scantable[i];
4552 const int level= block[j];
4555 run_tab[rle_index++]=run;
4562 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4568 return last_non_zero;
4572 * Permute an 8x8 block according to permutation.
4573 * @param block the block which will be permuted according to
4574 * the given permutation vector
4575 * @param permutation the permutation vector
4576 * @param last the last non zero coefficient in scantable order, used to
4577 * speed the permutation up
4578 * @param scantable the used scantable, this is only used to speed the
4579 * permutation up, the block is not (inverse) permutated
4580 * to scantable order!
4582 void ff_block_permute(int16_t *block, uint8_t *permutation,
4583 const uint8_t *scantable, int last)
4590 //FIXME it is ok but not clean and might fail for some permutations
4591 // if (permutation[1] == 1)
4594 for (i = 0; i <= last; i++) {
4595 const int j = scantable[i];
4600 for (i = 0; i <= last; i++) {
4601 const int j = scantable[i];
4602 const int perm_j = permutation[j];
4603 block[perm_j] = temp[j];
4607 int ff_dct_quantize_c(MpegEncContext *s,
4608 int16_t *block, int n,
4609 int qscale, int *overflow)
4611 int i, j, level, last_non_zero, q, start_i;
4613 const uint8_t *scantable;
4616 unsigned int threshold1, threshold2;
4618 s->fdsp.fdct(block);
4620 if(s->dct_error_sum)
4621 s->denoise_dct(s, block);
4624 scantable= s->intra_scantable.scantable;
4632 /* For AIC we skip quant/dequant of INTRADC */
4635 /* note: block[0] is assumed to be positive */
4636 block[0] = (block[0] + (q >> 1)) / q;
4639 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4640 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4642 scantable= s->inter_scantable.scantable;
4645 qmat = s->q_inter_matrix[qscale];
4646 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4648 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4649 threshold2= (threshold1<<1);
4650 for(i=63;i>=start_i;i--) {
4652 level = block[j] * qmat[j];
4654 if(((unsigned)(level+threshold1))>threshold2){
4661 for(i=start_i; i<=last_non_zero; i++) {
4663 level = block[j] * qmat[j];
4665 // if( bias+level >= (1<<QMAT_SHIFT)
4666 // || bias-level >= (1<<QMAT_SHIFT)){
4667 if(((unsigned)(level+threshold1))>threshold2){
4669 level= (bias + level)>>QMAT_SHIFT;
4672 level= (bias - level)>>QMAT_SHIFT;
4680 *overflow= s->max_qcoeff < max; //overflow might have happened
4682 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4683 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4684 ff_block_permute(block, s->idsp.idct_permutation,
4685 scantable, last_non_zero);
4687 return last_non_zero;
4690 #define OFFSET(x) offsetof(MpegEncContext, x)
4691 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4692 static const AVOption h263_options[] = {
4693 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4694 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4696 #if FF_API_MPEGVIDEO_OPTS
4697 FF_MPV_DEPRECATED_MPEG_QUANT_OPT
4698 FF_MPV_DEPRECATED_A53_CC_OPT
4699 FF_MPV_DEPRECATED_MATRIX_OPT
4700 FF_MPV_DEPRECATED_BFRAME_OPTS
4705 static const AVClass h263_class = {
4706 .class_name = "H.263 encoder",
4707 .item_name = av_default_item_name,
4708 .option = h263_options,
4709 .version = LIBAVUTIL_VERSION_INT,
4712 AVCodec ff_h263_encoder = {
4714 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4715 .type = AVMEDIA_TYPE_VIDEO,
4716 .id = AV_CODEC_ID_H263,
4717 .priv_data_size = sizeof(MpegEncContext),
4718 .init = ff_mpv_encode_init,
4719 .encode2 = ff_mpv_encode_picture,
4720 .close = ff_mpv_encode_end,
4721 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
4722 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4723 .priv_class = &h263_class,
4726 static const AVOption h263p_options[] = {
4727 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4728 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4729 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4730 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4732 #if FF_API_MPEGVIDEO_OPTS
4733 FF_MPV_DEPRECATED_MPEG_QUANT_OPT
4734 FF_MPV_DEPRECATED_A53_CC_OPT
4735 FF_MPV_DEPRECATED_MATRIX_OPT
4736 FF_MPV_DEPRECATED_BFRAME_OPTS
4740 static const AVClass h263p_class = {
4741 .class_name = "H.263p encoder",
4742 .item_name = av_default_item_name,
4743 .option = h263p_options,
4744 .version = LIBAVUTIL_VERSION_INT,
4747 AVCodec ff_h263p_encoder = {
4749 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4750 .type = AVMEDIA_TYPE_VIDEO,
4751 .id = AV_CODEC_ID_H263P,
4752 .priv_data_size = sizeof(MpegEncContext),
4753 .init = ff_mpv_encode_init,
4754 .encode2 = ff_mpv_encode_picture,
4755 .close = ff_mpv_encode_end,
4756 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4757 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
4758 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4759 .priv_class = &h263p_class,
4762 static const AVClass msmpeg4v2_class = {
4763 .class_name = "msmpeg4v2 encoder",
4764 .item_name = av_default_item_name,
4765 .option = ff_mpv_generic_options,
4766 .version = LIBAVUTIL_VERSION_INT,
4769 AVCodec ff_msmpeg4v2_encoder = {
4770 .name = "msmpeg4v2",
4771 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4772 .type = AVMEDIA_TYPE_VIDEO,
4773 .id = AV_CODEC_ID_MSMPEG4V2,
4774 .priv_data_size = sizeof(MpegEncContext),
4775 .init = ff_mpv_encode_init,
4776 .encode2 = ff_mpv_encode_picture,
4777 .close = ff_mpv_encode_end,
4778 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
4779 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4780 .priv_class = &msmpeg4v2_class,
4783 static const AVClass msmpeg4v3_class = {
4784 .class_name = "msmpeg4v3 encoder",
4785 .item_name = av_default_item_name,
4786 .option = ff_mpv_generic_options,
4787 .version = LIBAVUTIL_VERSION_INT,
4790 AVCodec ff_msmpeg4v3_encoder = {
4792 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4793 .type = AVMEDIA_TYPE_VIDEO,
4794 .id = AV_CODEC_ID_MSMPEG4V3,
4795 .priv_data_size = sizeof(MpegEncContext),
4796 .init = ff_mpv_encode_init,
4797 .encode2 = ff_mpv_encode_picture,
4798 .close = ff_mpv_encode_end,
4799 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
4800 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4801 .priv_class = &msmpeg4v3_class,
4804 static const AVClass wmv1_class = {
4805 .class_name = "wmv1 encoder",
4806 .item_name = av_default_item_name,
4807 .option = ff_mpv_generic_options,
4808 .version = LIBAVUTIL_VERSION_INT,
4811 AVCodec ff_wmv1_encoder = {
4813 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4814 .type = AVMEDIA_TYPE_VIDEO,
4815 .id = AV_CODEC_ID_WMV1,
4816 .priv_data_size = sizeof(MpegEncContext),
4817 .init = ff_mpv_encode_init,
4818 .encode2 = ff_mpv_encode_picture,
4819 .close = ff_mpv_encode_end,
4820 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
4821 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4822 .priv_class = &wmv1_class,