2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/mem_internal.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/thread.h"
47 #include "mpegvideo.h"
48 #include "mpegvideodata.h"
52 #include "mjpegenc_common.h"
54 #include "mpegutils.h"
56 #include "speedhqenc.h"
58 #include "pixblockdsp.h"
62 #include "aandcttab.h"
64 #include "mpeg4video.h"
66 #include "bytestream.h"
69 #include "packet_internal.h"
73 #define QUANT_BIAS_SHIFT 8
75 #define QMAT_SHIFT_MMX 16
78 static int encode_picture(MpegEncContext *s, int picture_number);
79 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
80 static int sse_mb(MpegEncContext *s);
81 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
82 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
84 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
85 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
87 const AVOption ff_mpv_generic_options[] = {
89 #if FF_API_MPEGVIDEO_OPTS
90 FF_MPV_DEPRECATED_MPEG_QUANT_OPT
91 FF_MPV_DEPRECATED_A53_CC_OPT
92 FF_MPV_DEPRECATED_MATRIX_OPT
93 FF_MPV_DEPRECATED_BFRAME_OPTS
98 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
99 uint16_t (*qmat16)[2][64],
100 const uint16_t *quant_matrix,
101 int bias, int qmin, int qmax, int intra)
103 FDCTDSPContext *fdsp = &s->fdsp;
107 for (qscale = qmin; qscale <= qmax; qscale++) {
111 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
112 else qscale2 = qscale << 1;
114 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
116 fdsp->fdct == ff_faandct ||
117 #endif /* CONFIG_FAANDCT */
118 fdsp->fdct == ff_jpeg_fdct_islow_10) {
119 for (i = 0; i < 64; i++) {
120 const int j = s->idsp.idct_permutation[i];
121 int64_t den = (int64_t) qscale2 * quant_matrix[j];
122 /* 16 <= qscale * quant_matrix[i] <= 7905
123 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
124 * 19952 <= x <= 249205026
125 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
126 * 3444240 >= (1 << 36) / (x) >= 275 */
128 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
130 } else if (fdsp->fdct == ff_fdct_ifast) {
131 for (i = 0; i < 64; i++) {
132 const int j = s->idsp.idct_permutation[i];
133 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
134 /* 16 <= qscale * quant_matrix[i] <= 7905
135 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
136 * 19952 <= x <= 249205026
137 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
138 * 3444240 >= (1 << 36) / (x) >= 275 */
140 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
143 for (i = 0; i < 64; i++) {
144 const int j = s->idsp.idct_permutation[i];
145 int64_t den = (int64_t) qscale2 * quant_matrix[j];
146 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
147 * Assume x = qscale * quant_matrix[i]
149 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
150 * so 32768 >= (1 << 19) / (x) >= 67 */
151 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
152 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
153 // (qscale * quant_matrix[i]);
154 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
156 if (qmat16[qscale][0][i] == 0 ||
157 qmat16[qscale][0][i] == 128 * 256)
158 qmat16[qscale][0][i] = 128 * 256 - 1;
159 qmat16[qscale][1][i] =
160 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
161 qmat16[qscale][0][i]);
165 for (i = intra; i < 64; i++) {
167 if (fdsp->fdct == ff_fdct_ifast) {
168 max = (8191LL * ff_aanscales[i]) >> 14;
170 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
176 av_log(s->avctx, AV_LOG_INFO,
177 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
182 static inline void update_qscale(MpegEncContext *s)
184 if (s->q_scale_type == 1 && 0) {
186 int bestdiff=INT_MAX;
189 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
190 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
191 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
192 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
194 if (diff < bestdiff) {
201 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
202 (FF_LAMBDA_SHIFT + 7);
203 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
206 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
210 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
216 for (i = 0; i < 64; i++) {
217 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
224 * init s->current_picture.qscale_table from s->lambda_table
226 void ff_init_qscale_tab(MpegEncContext *s)
228 int8_t * const qscale_table = s->current_picture.qscale_table;
231 for (i = 0; i < s->mb_num; i++) {
232 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
233 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
234 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
239 static void update_duplicate_context_after_me(MpegEncContext *dst,
242 #define COPY(a) dst->a= src->a
244 COPY(current_picture);
250 COPY(picture_in_gop_number);
251 COPY(gop_picture_number);
252 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
253 COPY(progressive_frame); // FIXME don't set in encode_header
254 COPY(partitioned_frame); // FIXME don't set in encode_header
258 static void mpv_encode_init_static(void)
260 for (int i = -16; i < 16; i++)
261 default_fcode_tab[i + MAX_MV] = 1;
265 * Set the given MpegEncContext to defaults for encoding.
266 * the changed fields will not depend upon the prior state of the MpegEncContext.
268 static void mpv_encode_defaults(MpegEncContext *s)
270 static AVOnce init_static_once = AV_ONCE_INIT;
272 ff_mpv_common_defaults(s);
274 ff_thread_once(&init_static_once, mpv_encode_init_static);
276 s->me.mv_penalty = default_mv_penalty;
277 s->fcode_tab = default_fcode_tab;
279 s->input_picture_number = 0;
280 s->picture_in_gop_number = 0;
283 av_cold int ff_dct_encode_init(MpegEncContext *s)
286 ff_dct_encode_init_x86(s);
288 if (CONFIG_H263_ENCODER)
289 ff_h263dsp_init(&s->h263dsp);
290 if (!s->dct_quantize)
291 s->dct_quantize = ff_dct_quantize_c;
293 s->denoise_dct = denoise_dct_c;
294 s->fast_dct_quantize = s->dct_quantize;
295 if (s->avctx->trellis)
296 s->dct_quantize = dct_quantize_trellis_c;
301 /* init video encoder */
302 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
304 MpegEncContext *s = avctx->priv_data;
305 AVCPBProperties *cpb_props;
308 mpv_encode_defaults(s);
310 switch (avctx->pix_fmt) {
311 case AV_PIX_FMT_YUVJ444P:
312 case AV_PIX_FMT_YUV444P:
313 s->chroma_format = CHROMA_444;
315 case AV_PIX_FMT_YUVJ422P:
316 case AV_PIX_FMT_YUV422P:
317 s->chroma_format = CHROMA_422;
319 case AV_PIX_FMT_YUVJ420P:
320 case AV_PIX_FMT_YUV420P:
322 s->chroma_format = CHROMA_420;
326 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
328 #if FF_API_PRIVATE_OPT
329 FF_DISABLE_DEPRECATION_WARNINGS
330 if (avctx->rtp_payload_size)
331 s->rtp_payload_size = avctx->rtp_payload_size;
332 if (avctx->me_penalty_compensation)
333 s->me_penalty_compensation = avctx->me_penalty_compensation;
335 s->me_pre = avctx->pre_me;
336 FF_ENABLE_DEPRECATION_WARNINGS
339 s->bit_rate = avctx->bit_rate;
340 s->width = avctx->width;
341 s->height = avctx->height;
342 if (avctx->gop_size > 600 &&
343 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
344 av_log(avctx, AV_LOG_WARNING,
345 "keyframe interval too large!, reducing it from %d to %d\n",
346 avctx->gop_size, 600);
347 avctx->gop_size = 600;
349 s->gop_size = avctx->gop_size;
351 if (avctx->max_b_frames > MAX_B_FRAMES) {
352 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
353 "is %d.\n", MAX_B_FRAMES);
354 avctx->max_b_frames = MAX_B_FRAMES;
356 s->max_b_frames = avctx->max_b_frames;
357 s->codec_id = avctx->codec->id;
358 s->strict_std_compliance = avctx->strict_std_compliance;
359 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
360 s->rtp_mode = !!s->rtp_payload_size;
361 s->intra_dc_precision = avctx->intra_dc_precision;
363 // workaround some differences between how applications specify dc precision
364 if (s->intra_dc_precision < 0) {
365 s->intra_dc_precision += 8;
366 } else if (s->intra_dc_precision >= 8)
367 s->intra_dc_precision -= 8;
369 if (s->intra_dc_precision < 0) {
370 av_log(avctx, AV_LOG_ERROR,
371 "intra dc precision must be positive, note some applications use"
372 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
373 return AVERROR(EINVAL);
376 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
379 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
380 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
381 return AVERROR(EINVAL);
383 s->user_specified_pts = AV_NOPTS_VALUE;
385 if (s->gop_size <= 1) {
393 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
395 s->adaptive_quant = (avctx->lumi_masking ||
396 avctx->dark_masking ||
397 avctx->temporal_cplx_masking ||
398 avctx->spatial_cplx_masking ||
401 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
404 s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
406 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
407 switch(avctx->codec_id) {
408 case AV_CODEC_ID_MPEG1VIDEO:
409 case AV_CODEC_ID_MPEG2VIDEO:
410 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
412 case AV_CODEC_ID_MPEG4:
413 case AV_CODEC_ID_MSMPEG4V1:
414 case AV_CODEC_ID_MSMPEG4V2:
415 case AV_CODEC_ID_MSMPEG4V3:
416 if (avctx->rc_max_rate >= 15000000) {
417 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
418 } else if(avctx->rc_max_rate >= 2000000) {
419 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
420 } else if(avctx->rc_max_rate >= 384000) {
421 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
423 avctx->rc_buffer_size = 40;
424 avctx->rc_buffer_size *= 16384;
427 if (avctx->rc_buffer_size) {
428 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
432 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
433 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
434 return AVERROR(EINVAL);
437 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
438 av_log(avctx, AV_LOG_INFO,
439 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
442 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
443 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
444 return AVERROR(EINVAL);
447 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
448 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
449 return AVERROR(EINVAL);
452 if (avctx->rc_max_rate &&
453 avctx->rc_max_rate == avctx->bit_rate &&
454 avctx->rc_max_rate != avctx->rc_min_rate) {
455 av_log(avctx, AV_LOG_INFO,
456 "impossible bitrate constraints, this will fail\n");
459 if (avctx->rc_buffer_size &&
460 avctx->bit_rate * (int64_t)avctx->time_base.num >
461 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
462 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
463 return AVERROR(EINVAL);
466 if (!s->fixed_qscale &&
467 avctx->bit_rate * av_q2d(avctx->time_base) >
468 avctx->bit_rate_tolerance) {
469 av_log(avctx, AV_LOG_WARNING,
470 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
471 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
474 if (avctx->rc_max_rate &&
475 avctx->rc_min_rate == avctx->rc_max_rate &&
476 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
477 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
478 90000LL * (avctx->rc_buffer_size - 1) >
479 avctx->rc_max_rate * 0xFFFFLL) {
480 av_log(avctx, AV_LOG_INFO,
481 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
482 "specified vbv buffer is too large for the given bitrate!\n");
485 if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
486 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
487 s->codec_id != AV_CODEC_ID_FLV1) {
488 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
489 return AVERROR(EINVAL);
492 if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
493 av_log(avctx, AV_LOG_ERROR,
494 "OBMC is only supported with simple mb decision\n");
495 return AVERROR(EINVAL);
498 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
499 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
500 return AVERROR(EINVAL);
503 if (s->max_b_frames &&
504 s->codec_id != AV_CODEC_ID_MPEG4 &&
505 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
506 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
507 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
508 return AVERROR(EINVAL);
510 if (s->max_b_frames < 0) {
511 av_log(avctx, AV_LOG_ERROR,
512 "max b frames must be 0 or positive for mpegvideo based encoders\n");
513 return AVERROR(EINVAL);
516 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
517 s->codec_id == AV_CODEC_ID_H263 ||
518 s->codec_id == AV_CODEC_ID_H263P) &&
519 (avctx->sample_aspect_ratio.num > 255 ||
520 avctx->sample_aspect_ratio.den > 255)) {
521 av_log(avctx, AV_LOG_WARNING,
522 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
523 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
524 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
525 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
528 if ((s->codec_id == AV_CODEC_ID_H263 ||
529 s->codec_id == AV_CODEC_ID_H263P) &&
530 (avctx->width > 2048 ||
531 avctx->height > 1152 )) {
532 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
533 return AVERROR(EINVAL);
535 if ((s->codec_id == AV_CODEC_ID_H263 ||
536 s->codec_id == AV_CODEC_ID_H263P ||
537 s->codec_id == AV_CODEC_ID_RV20) &&
538 ((avctx->width &3) ||
539 (avctx->height&3) )) {
540 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
541 return AVERROR(EINVAL);
544 if (s->codec_id == AV_CODEC_ID_RV10 &&
546 avctx->height&15 )) {
547 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
548 return AVERROR(EINVAL);
551 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
552 s->codec_id == AV_CODEC_ID_WMV2) &&
554 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
555 return AVERROR(EINVAL);
558 if ((avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
559 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
560 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
561 return AVERROR(EINVAL);
564 #if FF_API_PRIVATE_OPT
565 FF_DISABLE_DEPRECATION_WARNINGS
566 if (avctx->mpeg_quant)
568 FF_ENABLE_DEPRECATION_WARNINGS
570 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
571 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
572 av_log(avctx, AV_LOG_ERROR,
573 "mpeg2 style quantization not supported by codec\n");
574 return AVERROR(EINVAL);
578 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
579 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
580 return AVERROR(EINVAL);
583 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
584 avctx->mb_decision != FF_MB_DECISION_RD) {
585 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
586 return AVERROR(EINVAL);
589 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
590 (s->codec_id == AV_CODEC_ID_AMV ||
591 s->codec_id == AV_CODEC_ID_MJPEG)) {
592 // Used to produce garbage with MJPEG.
593 av_log(avctx, AV_LOG_ERROR,
594 "QP RD is no longer compatible with MJPEG or AMV\n");
595 return AVERROR(EINVAL);
598 #if FF_API_PRIVATE_OPT
599 FF_DISABLE_DEPRECATION_WARNINGS
600 if (avctx->scenechange_threshold)
601 s->scenechange_threshold = avctx->scenechange_threshold;
602 FF_ENABLE_DEPRECATION_WARNINGS
605 if (s->scenechange_threshold < 1000000000 &&
606 (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
607 av_log(avctx, AV_LOG_ERROR,
608 "closed gop with scene change detection are not supported yet, "
609 "set threshold to 1000000000\n");
610 return AVERROR_PATCHWELCOME;
613 if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
614 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
615 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
616 av_log(avctx, AV_LOG_ERROR,
617 "low delay forcing is only available for mpeg2, "
618 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
619 return AVERROR(EINVAL);
621 if (s->max_b_frames != 0) {
622 av_log(avctx, AV_LOG_ERROR,
623 "B-frames cannot be used with low delay\n");
624 return AVERROR(EINVAL);
628 if (s->q_scale_type == 1) {
629 if (avctx->qmax > 28) {
630 av_log(avctx, AV_LOG_ERROR,
631 "non linear quant only supports qmax <= 28 currently\n");
632 return AVERROR_PATCHWELCOME;
636 if (avctx->slices > 1 &&
637 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
638 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
639 return AVERROR(EINVAL);
642 if (avctx->thread_count > 1 &&
643 s->codec_id != AV_CODEC_ID_MPEG4 &&
644 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
645 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
646 s->codec_id != AV_CODEC_ID_MJPEG &&
647 (s->codec_id != AV_CODEC_ID_H263P)) {
648 av_log(avctx, AV_LOG_ERROR,
649 "multi threaded encoding not supported by codec\n");
650 return AVERROR_PATCHWELCOME;
653 if (avctx->thread_count < 1) {
654 av_log(avctx, AV_LOG_ERROR,
655 "automatic thread number detection not supported by codec, "
657 return AVERROR_PATCHWELCOME;
660 #if FF_API_PRIVATE_OPT
661 FF_DISABLE_DEPRECATION_WARNINGS
662 if (avctx->b_frame_strategy)
663 s->b_frame_strategy = avctx->b_frame_strategy;
664 if (avctx->b_sensitivity != 40)
665 s->b_sensitivity = avctx->b_sensitivity;
666 FF_ENABLE_DEPRECATION_WARNINGS
669 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
670 av_log(avctx, AV_LOG_INFO,
671 "notice: b_frame_strategy only affects the first pass\n");
672 s->b_frame_strategy = 0;
675 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
677 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
678 avctx->time_base.den /= i;
679 avctx->time_base.num /= i;
683 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
684 // (a + x * 3 / 8) / x
685 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
686 s->inter_quant_bias = 0;
688 s->intra_quant_bias = 0;
690 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
693 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
694 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
695 return AVERROR(EINVAL);
698 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
700 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
701 avctx->time_base.den > (1 << 16) - 1) {
702 av_log(avctx, AV_LOG_ERROR,
703 "timebase %d/%d not supported by MPEG 4 standard, "
704 "the maximum admitted value for the timebase denominator "
705 "is %d\n", avctx->time_base.num, avctx->time_base.den,
707 return AVERROR(EINVAL);
709 s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
711 switch (avctx->codec->id) {
712 case AV_CODEC_ID_MPEG1VIDEO:
713 s->out_format = FMT_MPEG1;
714 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
715 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
717 case AV_CODEC_ID_MPEG2VIDEO:
718 s->out_format = FMT_MPEG1;
719 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
720 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
723 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
724 case AV_CODEC_ID_MJPEG:
725 case AV_CODEC_ID_AMV:
726 s->out_format = FMT_MJPEG;
727 s->intra_only = 1; /* force intra only for jpeg */
728 if ((ret = ff_mjpeg_encode_init(s)) < 0)
734 case AV_CODEC_ID_SPEEDHQ:
735 s->out_format = FMT_SPEEDHQ;
736 s->intra_only = 1; /* force intra only for SHQ */
737 if (!CONFIG_SPEEDHQ_ENCODER)
738 return AVERROR_ENCODER_NOT_FOUND;
739 if ((ret = ff_speedhq_encode_init(s)) < 0)
744 case AV_CODEC_ID_H261:
745 if (!CONFIG_H261_ENCODER)
746 return AVERROR_ENCODER_NOT_FOUND;
747 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
748 av_log(avctx, AV_LOG_ERROR,
749 "The specified picture size of %dx%d is not valid for the "
750 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
751 s->width, s->height);
752 return AVERROR(EINVAL);
754 s->out_format = FMT_H261;
757 s->rtp_mode = 0; /* Sliced encoding not supported */
759 case AV_CODEC_ID_H263:
760 if (!CONFIG_H263_ENCODER)
761 return AVERROR_ENCODER_NOT_FOUND;
762 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
763 s->width, s->height) == 8) {
764 av_log(avctx, AV_LOG_ERROR,
765 "The specified picture size of %dx%d is not valid for "
766 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
767 "352x288, 704x576, and 1408x1152. "
768 "Try H.263+.\n", s->width, s->height);
769 return AVERROR(EINVAL);
771 s->out_format = FMT_H263;
775 case AV_CODEC_ID_H263P:
776 s->out_format = FMT_H263;
779 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
780 s->modified_quant = s->h263_aic;
781 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
782 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
785 /* These are just to be sure */
789 case AV_CODEC_ID_FLV1:
790 s->out_format = FMT_H263;
791 s->h263_flv = 2; /* format = 1; 11-bit codes */
792 s->unrestricted_mv = 1;
793 s->rtp_mode = 0; /* don't allow GOB */
797 case AV_CODEC_ID_RV10:
798 s->out_format = FMT_H263;
802 case AV_CODEC_ID_RV20:
803 s->out_format = FMT_H263;
806 s->modified_quant = 1;
810 s->unrestricted_mv = 0;
812 case AV_CODEC_ID_MPEG4:
813 s->out_format = FMT_H263;
815 s->unrestricted_mv = 1;
816 s->low_delay = s->max_b_frames ? 0 : 1;
817 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
819 case AV_CODEC_ID_MSMPEG4V2:
820 s->out_format = FMT_H263;
822 s->unrestricted_mv = 1;
823 s->msmpeg4_version = 2;
827 case AV_CODEC_ID_MSMPEG4V3:
828 s->out_format = FMT_H263;
830 s->unrestricted_mv = 1;
831 s->msmpeg4_version = 3;
832 s->flipflop_rounding = 1;
836 case AV_CODEC_ID_WMV1:
837 s->out_format = FMT_H263;
839 s->unrestricted_mv = 1;
840 s->msmpeg4_version = 4;
841 s->flipflop_rounding = 1;
845 case AV_CODEC_ID_WMV2:
846 s->out_format = FMT_H263;
848 s->unrestricted_mv = 1;
849 s->msmpeg4_version = 5;
850 s->flipflop_rounding = 1;
855 return AVERROR(EINVAL);
858 #if FF_API_PRIVATE_OPT
859 FF_DISABLE_DEPRECATION_WARNINGS
860 if (avctx->noise_reduction)
861 s->noise_reduction = avctx->noise_reduction;
862 FF_ENABLE_DEPRECATION_WARNINGS
865 avctx->has_b_frames = !s->low_delay;
869 s->progressive_frame =
870 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
871 AV_CODEC_FLAG_INTERLACED_ME) ||
876 if ((ret = ff_mpv_common_init(s)) < 0)
879 ff_fdctdsp_init(&s->fdsp, avctx);
880 ff_me_cmp_init(&s->mecc, avctx);
881 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
882 ff_pixblockdsp_init(&s->pdsp, avctx);
883 ff_qpeldsp_init(&s->qdsp);
885 if (s->msmpeg4_version) {
886 int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
887 if (!(s->ac_stats = av_mallocz(ac_stats_size)))
888 return AVERROR(ENOMEM);
891 if (!(avctx->stats_out = av_mallocz(256)) ||
892 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
893 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
894 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
895 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
896 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
897 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
898 !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
899 !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
900 return AVERROR(ENOMEM);
902 if (s->noise_reduction) {
903 if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
904 return AVERROR(ENOMEM);
907 ff_dct_encode_init(s);
909 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
910 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
912 if (s->slice_context_count > 1) {
915 if (avctx->codec_id == AV_CODEC_ID_H263P)
916 s->h263_slice_structured = 1;
919 s->quant_precision = 5;
921 #if FF_API_PRIVATE_OPT
922 FF_DISABLE_DEPRECATION_WARNINGS
923 if (avctx->frame_skip_threshold)
924 s->frame_skip_threshold = avctx->frame_skip_threshold;
925 if (avctx->frame_skip_factor)
926 s->frame_skip_factor = avctx->frame_skip_factor;
927 if (avctx->frame_skip_exp)
928 s->frame_skip_exp = avctx->frame_skip_exp;
929 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
930 s->frame_skip_cmp = avctx->frame_skip_cmp;
931 FF_ENABLE_DEPRECATION_WARNINGS
934 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
935 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
937 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
938 ff_h261_encode_init(s);
939 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
940 ff_h263_encode_init(s);
941 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
942 ff_msmpeg4_encode_init(s);
943 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
944 && s->out_format == FMT_MPEG1)
945 ff_mpeg1_encode_init(s);
948 for (i = 0; i < 64; i++) {
949 int j = s->idsp.idct_permutation[i];
950 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
952 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
953 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
954 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
956 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
957 } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
959 s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
962 s->chroma_intra_matrix[j] =
963 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
964 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
966 if (avctx->intra_matrix)
967 s->intra_matrix[j] = avctx->intra_matrix[i];
968 if (avctx->inter_matrix)
969 s->inter_matrix[j] = avctx->inter_matrix[i];
972 /* precompute matrix */
973 /* for mjpeg, we do include qscale in the matrix */
974 if (s->out_format != FMT_MJPEG) {
975 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
976 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
978 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
979 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
983 if ((ret = ff_rate_control_init(s)) < 0)
986 #if FF_API_PRIVATE_OPT
987 FF_DISABLE_DEPRECATION_WARNINGS
988 if (avctx->brd_scale)
989 s->brd_scale = avctx->brd_scale;
991 if (avctx->prediction_method)
992 s->pred = avctx->prediction_method + 1;
993 FF_ENABLE_DEPRECATION_WARNINGS
996 if (s->b_frame_strategy == 2) {
997 for (i = 0; i < s->max_b_frames + 2; i++) {
998 s->tmp_frames[i] = av_frame_alloc();
999 if (!s->tmp_frames[i])
1000 return AVERROR(ENOMEM);
1002 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1003 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1004 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1006 ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1012 cpb_props = ff_add_cpb_side_data(avctx);
1014 return AVERROR(ENOMEM);
1015 cpb_props->max_bitrate = avctx->rc_max_rate;
1016 cpb_props->min_bitrate = avctx->rc_min_rate;
1017 cpb_props->avg_bitrate = avctx->bit_rate;
1018 cpb_props->buffer_size = avctx->rc_buffer_size;
1023 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1025 MpegEncContext *s = avctx->priv_data;
1028 ff_rate_control_uninit(s);
1030 ff_mpv_common_end(s);
1031 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
1032 s->out_format == FMT_MJPEG)
1033 ff_mjpeg_encode_close(s);
1035 av_freep(&avctx->extradata);
1037 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1038 av_frame_free(&s->tmp_frames[i]);
1040 ff_free_picture_tables(&s->new_picture);
1041 ff_mpeg_unref_picture(avctx, &s->new_picture);
1043 av_freep(&avctx->stats_out);
1044 av_freep(&s->ac_stats);
1046 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1047 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1048 s->q_chroma_intra_matrix= NULL;
1049 s->q_chroma_intra_matrix16= NULL;
1050 av_freep(&s->q_intra_matrix);
1051 av_freep(&s->q_inter_matrix);
1052 av_freep(&s->q_intra_matrix16);
1053 av_freep(&s->q_inter_matrix16);
1054 av_freep(&s->input_picture);
1055 av_freep(&s->reordered_input_picture);
1056 av_freep(&s->dct_offset);
1061 static int get_sae(uint8_t *src, int ref, int stride)
1066 for (y = 0; y < 16; y++) {
1067 for (x = 0; x < 16; x++) {
1068 acc += FFABS(src[x + y * stride] - ref);
1075 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1076 uint8_t *ref, int stride)
1082 h = s->height & ~15;
1084 for (y = 0; y < h; y += 16) {
1085 for (x = 0; x < w; x += 16) {
1086 int offset = x + y * stride;
1087 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1089 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1090 int sae = get_sae(src + offset, mean, stride);
1092 acc += sae + 500 < sad;
1098 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1100 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1101 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1102 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1103 &s->linesize, &s->uvlinesize);
1106 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1108 Picture *pic = NULL;
1110 int i, display_picture_number = 0, ret;
1111 int encoding_delay = s->max_b_frames ? s->max_b_frames
1112 : (s->low_delay ? 0 : 1);
1113 int flush_offset = 1;
1118 display_picture_number = s->input_picture_number++;
1120 if (pts != AV_NOPTS_VALUE) {
1121 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1122 int64_t last = s->user_specified_pts;
1125 av_log(s->avctx, AV_LOG_ERROR,
1126 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1128 return AVERROR(EINVAL);
1131 if (!s->low_delay && display_picture_number == 1)
1132 s->dts_delta = pts - last;
1134 s->user_specified_pts = pts;
1136 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1137 s->user_specified_pts =
1138 pts = s->user_specified_pts + 1;
1139 av_log(s->avctx, AV_LOG_INFO,
1140 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1143 pts = display_picture_number;
1147 if (!pic_arg->buf[0] ||
1148 pic_arg->linesize[0] != s->linesize ||
1149 pic_arg->linesize[1] != s->uvlinesize ||
1150 pic_arg->linesize[2] != s->uvlinesize)
1152 if ((s->width & 15) || (s->height & 15))
1154 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1156 if (s->linesize & (STRIDE_ALIGN-1))
1159 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1160 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1162 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1166 pic = &s->picture[i];
1170 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1173 ret = alloc_picture(s, pic, direct);
1178 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1179 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1180 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1183 int h_chroma_shift, v_chroma_shift;
1184 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1188 for (i = 0; i < 3; i++) {
1189 int src_stride = pic_arg->linesize[i];
1190 int dst_stride = i ? s->uvlinesize : s->linesize;
1191 int h_shift = i ? h_chroma_shift : 0;
1192 int v_shift = i ? v_chroma_shift : 0;
1193 int w = s->width >> h_shift;
1194 int h = s->height >> v_shift;
1195 uint8_t *src = pic_arg->data[i];
1196 uint8_t *dst = pic->f->data[i];
1199 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1200 && !s->progressive_sequence
1201 && FFALIGN(s->height, 32) - s->height > 16)
1204 if (!s->avctx->rc_buffer_size)
1205 dst += INPLACE_OFFSET;
1207 if (src_stride == dst_stride)
1208 memcpy(dst, src, src_stride * h);
1211 uint8_t *dst2 = dst;
1213 memcpy(dst2, src, w);
1218 if ((s->width & 15) || (s->height & (vpad-1))) {
1219 s->mpvencdsp.draw_edges(dst, dst_stride,
1229 ret = av_frame_copy_props(pic->f, pic_arg);
1233 pic->f->display_picture_number = display_picture_number;
1234 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1236 /* Flushing: When we have not received enough input frames,
1237 * ensure s->input_picture[0] contains the first picture */
1238 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1239 if (s->input_picture[flush_offset])
1242 if (flush_offset <= 1)
1245 encoding_delay = encoding_delay - flush_offset + 1;
1248 /* shift buffer entries */
1249 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1250 s->input_picture[i - flush_offset] = s->input_picture[i];
1252 s->input_picture[encoding_delay] = (Picture*) pic;
1257 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1261 int64_t score64 = 0;
1263 for (plane = 0; plane < 3; plane++) {
1264 const int stride = p->f->linesize[plane];
1265 const int bw = plane ? 1 : 2;
1266 for (y = 0; y < s->mb_height * bw; y++) {
1267 for (x = 0; x < s->mb_width * bw; x++) {
1268 int off = p->shared ? 0 : 16;
1269 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1270 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1271 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1273 switch (FFABS(s->frame_skip_exp)) {
1274 case 0: score = FFMAX(score, v); break;
1275 case 1: score += FFABS(v); break;
1276 case 2: score64 += v * (int64_t)v; break;
1277 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1278 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1287 if (s->frame_skip_exp < 0)
1288 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1289 -1.0/s->frame_skip_exp);
1291 if (score64 < s->frame_skip_threshold)
1293 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1298 static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
1303 ret = avcodec_send_frame(c, frame);
1308 ret = avcodec_receive_packet(c, pkt);
1311 av_packet_unref(pkt);
1312 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1319 static int estimate_best_b_count(MpegEncContext *s)
1321 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1323 const int scale = s->brd_scale;
1324 int width = s->width >> scale;
1325 int height = s->height >> scale;
1326 int i, j, out_size, p_lambda, b_lambda, lambda2;
1327 int64_t best_rd = INT64_MAX;
1328 int best_b_count = -1;
1331 av_assert0(scale >= 0 && scale <= 3);
1333 pkt = av_packet_alloc();
1335 return AVERROR(ENOMEM);
1338 //s->next_picture_ptr->quality;
1339 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1340 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1341 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1342 if (!b_lambda) // FIXME we should do this somewhere else
1343 b_lambda = p_lambda;
1344 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1347 for (i = 0; i < s->max_b_frames + 2; i++) {
1348 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1349 s->next_picture_ptr;
1352 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1353 pre_input = *pre_input_ptr;
1354 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1356 if (!pre_input.shared && i) {
1357 data[0] += INPLACE_OFFSET;
1358 data[1] += INPLACE_OFFSET;
1359 data[2] += INPLACE_OFFSET;
1362 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1363 s->tmp_frames[i]->linesize[0],
1365 pre_input.f->linesize[0],
1367 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1368 s->tmp_frames[i]->linesize[1],
1370 pre_input.f->linesize[1],
1371 width >> 1, height >> 1);
1372 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1373 s->tmp_frames[i]->linesize[2],
1375 pre_input.f->linesize[2],
1376 width >> 1, height >> 1);
1380 for (j = 0; j < s->max_b_frames + 1; j++) {
1384 if (!s->input_picture[j])
1387 c = avcodec_alloc_context3(NULL);
1389 ret = AVERROR(ENOMEM);
1395 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1396 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1397 c->mb_decision = s->avctx->mb_decision;
1398 c->me_cmp = s->avctx->me_cmp;
1399 c->mb_cmp = s->avctx->mb_cmp;
1400 c->me_sub_cmp = s->avctx->me_sub_cmp;
1401 c->pix_fmt = AV_PIX_FMT_YUV420P;
1402 c->time_base = s->avctx->time_base;
1403 c->max_b_frames = s->max_b_frames;
1405 ret = avcodec_open2(c, codec, NULL);
1410 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1411 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1413 out_size = encode_frame(c, s->tmp_frames[0], pkt);
1419 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1421 for (i = 0; i < s->max_b_frames + 1; i++) {
1422 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1424 s->tmp_frames[i + 1]->pict_type = is_p ?
1425 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1426 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1428 out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1434 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1437 /* get the delayed frames */
1438 out_size = encode_frame(c, NULL, pkt);
1443 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1445 rd += c->error[0] + c->error[1] + c->error[2];
1453 avcodec_free_context(&c);
1454 av_packet_unref(pkt);
1461 av_packet_free(&pkt);
1463 return best_b_count;
1466 static int select_input_picture(MpegEncContext *s)
1470 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1471 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1472 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1474 /* set next picture type & ordering */
1475 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1476 if (s->frame_skip_threshold || s->frame_skip_factor) {
1477 if (s->picture_in_gop_number < s->gop_size &&
1478 s->next_picture_ptr &&
1479 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1480 // FIXME check that the gop check above is +-1 correct
1481 av_frame_unref(s->input_picture[0]->f);
1483 ff_vbv_update(s, 0);
1489 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1490 !s->next_picture_ptr || s->intra_only) {
1491 s->reordered_input_picture[0] = s->input_picture[0];
1492 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1493 s->reordered_input_picture[0]->f->coded_picture_number =
1494 s->coded_picture_number++;
1498 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1499 for (i = 0; i < s->max_b_frames + 1; i++) {
1500 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1502 if (pict_num >= s->rc_context.num_entries)
1504 if (!s->input_picture[i]) {
1505 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1509 s->input_picture[i]->f->pict_type =
1510 s->rc_context.entry[pict_num].new_pict_type;
1514 if (s->b_frame_strategy == 0) {
1515 b_frames = s->max_b_frames;
1516 while (b_frames && !s->input_picture[b_frames])
1518 } else if (s->b_frame_strategy == 1) {
1519 for (i = 1; i < s->max_b_frames + 1; i++) {
1520 if (s->input_picture[i] &&
1521 s->input_picture[i]->b_frame_score == 0) {
1522 s->input_picture[i]->b_frame_score =
1524 s->input_picture[i ]->f->data[0],
1525 s->input_picture[i - 1]->f->data[0],
1529 for (i = 0; i < s->max_b_frames + 1; i++) {
1530 if (!s->input_picture[i] ||
1531 s->input_picture[i]->b_frame_score - 1 >
1532 s->mb_num / s->b_sensitivity)
1536 b_frames = FFMAX(0, i - 1);
1539 for (i = 0; i < b_frames + 1; i++) {
1540 s->input_picture[i]->b_frame_score = 0;
1542 } else if (s->b_frame_strategy == 2) {
1543 b_frames = estimate_best_b_count(s);
1550 for (i = b_frames - 1; i >= 0; i--) {
1551 int type = s->input_picture[i]->f->pict_type;
1552 if (type && type != AV_PICTURE_TYPE_B)
1555 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1556 b_frames == s->max_b_frames) {
1557 av_log(s->avctx, AV_LOG_ERROR,
1558 "warning, too many B-frames in a row\n");
1561 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1562 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1563 s->gop_size > s->picture_in_gop_number) {
1564 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1566 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1568 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1572 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1573 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1576 s->reordered_input_picture[0] = s->input_picture[b_frames];
1577 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1578 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1579 s->reordered_input_picture[0]->f->coded_picture_number =
1580 s->coded_picture_number++;
1581 for (i = 0; i < b_frames; i++) {
1582 s->reordered_input_picture[i + 1] = s->input_picture[i];
1583 s->reordered_input_picture[i + 1]->f->pict_type =
1585 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1586 s->coded_picture_number++;
1591 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1593 if (s->reordered_input_picture[0]) {
1594 s->reordered_input_picture[0]->reference =
1595 s->reordered_input_picture[0]->f->pict_type !=
1596 AV_PICTURE_TYPE_B ? 3 : 0;
1598 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1601 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1602 // input is a shared pix, so we can't modify it -> allocate a new
1603 // one & ensure that the shared one is reuseable
1606 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1609 pic = &s->picture[i];
1611 pic->reference = s->reordered_input_picture[0]->reference;
1612 if (alloc_picture(s, pic, 0) < 0) {
1616 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1620 /* mark us unused / free shared pic */
1621 av_frame_unref(s->reordered_input_picture[0]->f);
1622 s->reordered_input_picture[0]->shared = 0;
1624 s->current_picture_ptr = pic;
1626 // input is not a shared pix -> reuse buffer for current_pix
1627 s->current_picture_ptr = s->reordered_input_picture[0];
1628 for (i = 0; i < 4; i++) {
1629 if (s->new_picture.f->data[i])
1630 s->new_picture.f->data[i] += INPLACE_OFFSET;
1633 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1634 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1635 s->current_picture_ptr)) < 0)
1638 s->picture_number = s->new_picture.f->display_picture_number;
1643 static void frame_end(MpegEncContext *s)
1645 if (s->unrestricted_mv &&
1646 s->current_picture.reference &&
1648 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1649 int hshift = desc->log2_chroma_w;
1650 int vshift = desc->log2_chroma_h;
1651 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1652 s->current_picture.f->linesize[0],
1653 s->h_edge_pos, s->v_edge_pos,
1654 EDGE_WIDTH, EDGE_WIDTH,
1655 EDGE_TOP | EDGE_BOTTOM);
1656 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1657 s->current_picture.f->linesize[1],
1658 s->h_edge_pos >> hshift,
1659 s->v_edge_pos >> vshift,
1660 EDGE_WIDTH >> hshift,
1661 EDGE_WIDTH >> vshift,
1662 EDGE_TOP | EDGE_BOTTOM);
1663 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1664 s->current_picture.f->linesize[2],
1665 s->h_edge_pos >> hshift,
1666 s->v_edge_pos >> vshift,
1667 EDGE_WIDTH >> hshift,
1668 EDGE_WIDTH >> vshift,
1669 EDGE_TOP | EDGE_BOTTOM);
1674 s->last_pict_type = s->pict_type;
1675 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1676 if (s->pict_type!= AV_PICTURE_TYPE_B)
1677 s->last_non_b_pict_type = s->pict_type;
1679 #if FF_API_CODED_FRAME
1680 FF_DISABLE_DEPRECATION_WARNINGS
1681 av_frame_unref(s->avctx->coded_frame);
1682 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1683 FF_ENABLE_DEPRECATION_WARNINGS
1685 #if FF_API_ERROR_FRAME
1686 FF_DISABLE_DEPRECATION_WARNINGS
1687 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1688 sizeof(s->current_picture.encoding_error));
1689 FF_ENABLE_DEPRECATION_WARNINGS
1693 static void update_noise_reduction(MpegEncContext *s)
1697 for (intra = 0; intra < 2; intra++) {
1698 if (s->dct_count[intra] > (1 << 16)) {
1699 for (i = 0; i < 64; i++) {
1700 s->dct_error_sum[intra][i] >>= 1;
1702 s->dct_count[intra] >>= 1;
1705 for (i = 0; i < 64; i++) {
1706 s->dct_offset[intra][i] = (s->noise_reduction *
1707 s->dct_count[intra] +
1708 s->dct_error_sum[intra][i] / 2) /
1709 (s->dct_error_sum[intra][i] + 1);
1714 static int frame_start(MpegEncContext *s)
1718 /* mark & release old frames */
1719 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1720 s->last_picture_ptr != s->next_picture_ptr &&
1721 s->last_picture_ptr->f->buf[0]) {
1722 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1725 s->current_picture_ptr->f->pict_type = s->pict_type;
1726 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1728 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1729 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1730 s->current_picture_ptr)) < 0)
1733 if (s->pict_type != AV_PICTURE_TYPE_B) {
1734 s->last_picture_ptr = s->next_picture_ptr;
1736 s->next_picture_ptr = s->current_picture_ptr;
1739 if (s->last_picture_ptr) {
1740 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1741 if (s->last_picture_ptr->f->buf[0] &&
1742 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1743 s->last_picture_ptr)) < 0)
1746 if (s->next_picture_ptr) {
1747 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1748 if (s->next_picture_ptr->f->buf[0] &&
1749 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1750 s->next_picture_ptr)) < 0)
1754 if (s->picture_structure!= PICT_FRAME) {
1756 for (i = 0; i < 4; i++) {
1757 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1758 s->current_picture.f->data[i] +=
1759 s->current_picture.f->linesize[i];
1761 s->current_picture.f->linesize[i] *= 2;
1762 s->last_picture.f->linesize[i] *= 2;
1763 s->next_picture.f->linesize[i] *= 2;
1767 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1768 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1769 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1770 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1771 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1772 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1774 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1775 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1778 if (s->dct_error_sum) {
1779 av_assert2(s->noise_reduction && s->encoding);
1780 update_noise_reduction(s);
1786 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1787 const AVFrame *pic_arg, int *got_packet)
1789 MpegEncContext *s = avctx->priv_data;
1790 int i, stuffing_count, ret;
1791 int context_count = s->slice_context_count;
1793 s->vbv_ignore_qmax = 0;
1795 s->picture_in_gop_number++;
1797 if (load_input_picture(s, pic_arg) < 0)
1800 if (select_input_picture(s) < 0) {
1805 if (s->new_picture.f->data[0]) {
1806 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1807 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1809 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1810 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1813 s->mb_info_ptr = av_packet_new_side_data(pkt,
1814 AV_PKT_DATA_H263_MB_INFO,
1815 s->mb_width*s->mb_height*12);
1816 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1819 for (i = 0; i < context_count; i++) {
1820 int start_y = s->thread_context[i]->start_mb_y;
1821 int end_y = s->thread_context[i]-> end_mb_y;
1822 int h = s->mb_height;
1823 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1824 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1826 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1829 s->pict_type = s->new_picture.f->pict_type;
1831 ret = frame_start(s);
1835 ret = encode_picture(s, s->picture_number);
1836 if (growing_buffer) {
1837 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1838 pkt->data = s->pb.buf;
1839 pkt->size = avctx->internal->byte_buffer_size;
1844 #if FF_API_STAT_BITS
1845 FF_DISABLE_DEPRECATION_WARNINGS
1846 avctx->header_bits = s->header_bits;
1847 avctx->mv_bits = s->mv_bits;
1848 avctx->misc_bits = s->misc_bits;
1849 avctx->i_tex_bits = s->i_tex_bits;
1850 avctx->p_tex_bits = s->p_tex_bits;
1851 avctx->i_count = s->i_count;
1852 // FIXME f/b_count in avctx
1853 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1854 avctx->skip_count = s->skip_count;
1855 FF_ENABLE_DEPRECATION_WARNINGS
1860 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
1861 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1863 if (avctx->rc_buffer_size) {
1864 RateControlContext *rcc = &s->rc_context;
1865 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1866 int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1867 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1869 if (put_bits_count(&s->pb) > max_size &&
1870 s->lambda < s->lmax) {
1871 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1872 (s->qscale + 1) / s->qscale);
1873 if (s->adaptive_quant) {
1875 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1876 s->lambda_table[i] =
1877 FFMAX(s->lambda_table[i] + min_step,
1878 s->lambda_table[i] * (s->qscale + 1) /
1881 s->mb_skipped = 0; // done in frame_start()
1882 // done in encode_picture() so we must undo it
1883 if (s->pict_type == AV_PICTURE_TYPE_P) {
1884 if (s->flipflop_rounding ||
1885 s->codec_id == AV_CODEC_ID_H263P ||
1886 s->codec_id == AV_CODEC_ID_MPEG4)
1887 s->no_rounding ^= 1;
1889 if (s->pict_type != AV_PICTURE_TYPE_B) {
1890 s->time_base = s->last_time_base;
1891 s->last_non_b_time = s->time - s->pp_time;
1893 for (i = 0; i < context_count; i++) {
1894 PutBitContext *pb = &s->thread_context[i]->pb;
1895 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1897 s->vbv_ignore_qmax = 1;
1898 av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1902 av_assert0(avctx->rc_max_rate);
1905 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1906 ff_write_pass1_stats(s);
1908 for (i = 0; i < 4; i++) {
1909 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1910 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1912 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1913 s->current_picture_ptr->encoding_error,
1914 (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1917 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1918 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1919 s->misc_bits + s->i_tex_bits +
1921 flush_put_bits(&s->pb);
1922 s->frame_bits = put_bits_count(&s->pb);
1924 stuffing_count = ff_vbv_update(s, s->frame_bits);
1925 s->stuffing_bits = 8*stuffing_count;
1926 if (stuffing_count) {
1927 if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1928 av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1932 switch (s->codec_id) {
1933 case AV_CODEC_ID_MPEG1VIDEO:
1934 case AV_CODEC_ID_MPEG2VIDEO:
1935 while (stuffing_count--) {
1936 put_bits(&s->pb, 8, 0);
1939 case AV_CODEC_ID_MPEG4:
1940 put_bits(&s->pb, 16, 0);
1941 put_bits(&s->pb, 16, 0x1C3);
1942 stuffing_count -= 4;
1943 while (stuffing_count--) {
1944 put_bits(&s->pb, 8, 0xFF);
1948 av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1950 flush_put_bits(&s->pb);
1951 s->frame_bits = put_bits_count(&s->pb);
1954 /* update MPEG-1/2 vbv_delay for CBR */
1955 if (avctx->rc_max_rate &&
1956 avctx->rc_min_rate == avctx->rc_max_rate &&
1957 s->out_format == FMT_MPEG1 &&
1958 90000LL * (avctx->rc_buffer_size - 1) <=
1959 avctx->rc_max_rate * 0xFFFFLL) {
1960 AVCPBProperties *props;
1963 int vbv_delay, min_delay;
1964 double inbits = avctx->rc_max_rate *
1965 av_q2d(avctx->time_base);
1966 int minbits = s->frame_bits - 8 *
1967 (s->vbv_delay_ptr - s->pb.buf - 1);
1968 double bits = s->rc_context.buffer_index + minbits - inbits;
1971 av_log(avctx, AV_LOG_ERROR,
1972 "Internal error, negative bits\n");
1974 av_assert1(s->repeat_first_field == 0);
1976 vbv_delay = bits * 90000 / avctx->rc_max_rate;
1977 min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
1980 vbv_delay = FFMAX(vbv_delay, min_delay);
1982 av_assert0(vbv_delay < 0xFFFF);
1984 s->vbv_delay_ptr[0] &= 0xF8;
1985 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1986 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1987 s->vbv_delay_ptr[2] &= 0x07;
1988 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1990 props = av_cpb_properties_alloc(&props_size);
1992 return AVERROR(ENOMEM);
1993 props->vbv_delay = vbv_delay * 300;
1995 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
1996 (uint8_t*)props, props_size);
2002 #if FF_API_VBV_DELAY
2003 FF_DISABLE_DEPRECATION_WARNINGS
2004 avctx->vbv_delay = vbv_delay * 300;
2005 FF_ENABLE_DEPRECATION_WARNINGS
2008 s->total_bits += s->frame_bits;
2009 #if FF_API_STAT_BITS
2010 FF_DISABLE_DEPRECATION_WARNINGS
2011 avctx->frame_bits = s->frame_bits;
2012 FF_ENABLE_DEPRECATION_WARNINGS
2016 pkt->pts = s->current_picture.f->pts;
2017 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2018 if (!s->current_picture.f->coded_picture_number)
2019 pkt->dts = pkt->pts - s->dts_delta;
2021 pkt->dts = s->reordered_pts;
2022 s->reordered_pts = pkt->pts;
2024 pkt->dts = pkt->pts;
2025 if (s->current_picture.f->key_frame)
2026 pkt->flags |= AV_PKT_FLAG_KEY;
2028 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2033 /* release non-reference frames */
2034 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2035 if (!s->picture[i].reference)
2036 ff_mpeg_unref_picture(avctx, &s->picture[i]);
2039 av_assert1((s->frame_bits & 7) == 0);
2041 pkt->size = s->frame_bits / 8;
2042 *got_packet = !!pkt->size;
2046 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2047 int n, int threshold)
2049 static const char tab[64] = {
2050 3, 2, 2, 1, 1, 1, 1, 1,
2051 1, 1, 1, 1, 1, 1, 1, 1,
2052 1, 1, 1, 1, 1, 1, 1, 1,
2053 0, 0, 0, 0, 0, 0, 0, 0,
2054 0, 0, 0, 0, 0, 0, 0, 0,
2055 0, 0, 0, 0, 0, 0, 0, 0,
2056 0, 0, 0, 0, 0, 0, 0, 0,
2057 0, 0, 0, 0, 0, 0, 0, 0
2062 int16_t *block = s->block[n];
2063 const int last_index = s->block_last_index[n];
2066 if (threshold < 0) {
2068 threshold = -threshold;
2072 /* Are all we could set to zero already zero? */
2073 if (last_index <= skip_dc - 1)
2076 for (i = 0; i <= last_index; i++) {
2077 const int j = s->intra_scantable.permutated[i];
2078 const int level = FFABS(block[j]);
2080 if (skip_dc && i == 0)
2084 } else if (level > 1) {
2090 if (score >= threshold)
2092 for (i = skip_dc; i <= last_index; i++) {
2093 const int j = s->intra_scantable.permutated[i];
2097 s->block_last_index[n] = 0;
2099 s->block_last_index[n] = -1;
2102 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2106 const int maxlevel = s->max_qcoeff;
2107 const int minlevel = s->min_qcoeff;
2111 i = 1; // skip clipping of intra dc
2115 for (; i <= last_index; i++) {
2116 const int j = s->intra_scantable.permutated[i];
2117 int level = block[j];
2119 if (level > maxlevel) {
2122 } else if (level < minlevel) {
2130 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2131 av_log(s->avctx, AV_LOG_INFO,
2132 "warning, clipping %d dct coefficients to %d..%d\n",
2133 overflow, minlevel, maxlevel);
2136 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2140 for (y = 0; y < 8; y++) {
2141 for (x = 0; x < 8; x++) {
2147 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2148 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2149 int v = ptr[x2 + y2 * stride];
2155 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2160 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2161 int motion_x, int motion_y,
2162 int mb_block_height,
2166 int16_t weight[12][64];
2167 int16_t orig[12][64];
2168 const int mb_x = s->mb_x;
2169 const int mb_y = s->mb_y;
2172 int dct_offset = s->linesize * 8; // default for progressive frames
2173 int uv_dct_offset = s->uvlinesize * 8;
2174 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2175 ptrdiff_t wrap_y, wrap_c;
2177 for (i = 0; i < mb_block_count; i++)
2178 skip_dct[i] = s->skipdct;
2180 if (s->adaptive_quant) {
2181 const int last_qp = s->qscale;
2182 const int mb_xy = mb_x + mb_y * s->mb_stride;
2184 s->lambda = s->lambda_table[mb_xy];
2187 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2188 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2189 s->dquant = s->qscale - last_qp;
2191 if (s->out_format == FMT_H263) {
2192 s->dquant = av_clip(s->dquant, -2, 2);
2194 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2196 if (s->pict_type == AV_PICTURE_TYPE_B) {
2197 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2200 if (s->mv_type == MV_TYPE_8X8)
2206 ff_set_qscale(s, last_qp + s->dquant);
2207 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2208 ff_set_qscale(s, s->qscale + s->dquant);
2210 wrap_y = s->linesize;
2211 wrap_c = s->uvlinesize;
2212 ptr_y = s->new_picture.f->data[0] +
2213 (mb_y * 16 * wrap_y) + mb_x * 16;
2214 ptr_cb = s->new_picture.f->data[1] +
2215 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2216 ptr_cr = s->new_picture.f->data[2] +
2217 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2219 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2220 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2221 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2222 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2223 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2225 16, 16, mb_x * 16, mb_y * 16,
2226 s->width, s->height);
2228 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2230 mb_block_width, mb_block_height,
2231 mb_x * mb_block_width, mb_y * mb_block_height,
2233 ptr_cb = ebuf + 16 * wrap_y;
2234 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2236 mb_block_width, mb_block_height,
2237 mb_x * mb_block_width, mb_y * mb_block_height,
2239 ptr_cr = ebuf + 16 * wrap_y + 16;
2243 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2244 int progressive_score, interlaced_score;
2246 s->interlaced_dct = 0;
2247 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2248 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2249 NULL, wrap_y, 8) - 400;
2251 if (progressive_score > 0) {
2252 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2253 NULL, wrap_y * 2, 8) +
2254 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2255 NULL, wrap_y * 2, 8);
2256 if (progressive_score > interlaced_score) {
2257 s->interlaced_dct = 1;
2259 dct_offset = wrap_y;
2260 uv_dct_offset = wrap_c;
2262 if (s->chroma_format == CHROMA_422 ||
2263 s->chroma_format == CHROMA_444)
2269 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2270 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2271 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2272 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2274 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2278 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2279 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2280 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2281 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2282 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2283 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2284 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2285 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2286 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2287 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2288 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2289 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2293 op_pixels_func (*op_pix)[4];
2294 qpel_mc_func (*op_qpix)[16];
2295 uint8_t *dest_y, *dest_cb, *dest_cr;
2297 dest_y = s->dest[0];
2298 dest_cb = s->dest[1];
2299 dest_cr = s->dest[2];
2301 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2302 op_pix = s->hdsp.put_pixels_tab;
2303 op_qpix = s->qdsp.put_qpel_pixels_tab;
2305 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2306 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2309 if (s->mv_dir & MV_DIR_FORWARD) {
2310 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2311 s->last_picture.f->data,
2313 op_pix = s->hdsp.avg_pixels_tab;
2314 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2316 if (s->mv_dir & MV_DIR_BACKWARD) {
2317 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2318 s->next_picture.f->data,
2322 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2323 int progressive_score, interlaced_score;
2325 s->interlaced_dct = 0;
2326 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2327 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2331 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2332 progressive_score -= 400;
2334 if (progressive_score > 0) {
2335 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2337 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2341 if (progressive_score > interlaced_score) {
2342 s->interlaced_dct = 1;
2344 dct_offset = wrap_y;
2345 uv_dct_offset = wrap_c;
2347 if (s->chroma_format == CHROMA_422)
2353 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2354 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2355 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2356 dest_y + dct_offset, wrap_y);
2357 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2358 dest_y + dct_offset + 8, wrap_y);
2360 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2364 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2365 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2366 if (!s->chroma_y_shift) { /* 422 */
2367 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2368 dest_cb + uv_dct_offset, wrap_c);
2369 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2370 dest_cr + uv_dct_offset, wrap_c);
2373 /* pre quantization */
2374 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2375 2 * s->qscale * s->qscale) {
2377 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2379 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2381 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2382 wrap_y, 8) < 20 * s->qscale)
2384 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2385 wrap_y, 8) < 20 * s->qscale)
2387 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2389 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2391 if (!s->chroma_y_shift) { /* 422 */
2392 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2393 dest_cb + uv_dct_offset,
2394 wrap_c, 8) < 20 * s->qscale)
2396 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2397 dest_cr + uv_dct_offset,
2398 wrap_c, 8) < 20 * s->qscale)
2404 if (s->quantizer_noise_shaping) {
2406 get_visual_weight(weight[0], ptr_y , wrap_y);
2408 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2410 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2412 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2414 get_visual_weight(weight[4], ptr_cb , wrap_c);
2416 get_visual_weight(weight[5], ptr_cr , wrap_c);
2417 if (!s->chroma_y_shift) { /* 422 */
2419 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2422 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2425 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2428 /* DCT & quantize */
2429 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2431 for (i = 0; i < mb_block_count; i++) {
2434 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2435 // FIXME we could decide to change to quantizer instead of
2437 // JS: I don't think that would be a good idea it could lower
2438 // quality instead of improve it. Just INTRADC clipping
2439 // deserves changes in quantizer
2441 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2443 s->block_last_index[i] = -1;
2445 if (s->quantizer_noise_shaping) {
2446 for (i = 0; i < mb_block_count; i++) {
2448 s->block_last_index[i] =
2449 dct_quantize_refine(s, s->block[i], weight[i],
2450 orig[i], i, s->qscale);
2455 if (s->luma_elim_threshold && !s->mb_intra)
2456 for (i = 0; i < 4; i++)
2457 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2458 if (s->chroma_elim_threshold && !s->mb_intra)
2459 for (i = 4; i < mb_block_count; i++)
2460 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2462 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2463 for (i = 0; i < mb_block_count; i++) {
2464 if (s->block_last_index[i] == -1)
2465 s->coded_score[i] = INT_MAX / 256;
2470 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2471 s->block_last_index[4] =
2472 s->block_last_index[5] = 0;
2474 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2475 if (!s->chroma_y_shift) { /* 422 / 444 */
2476 for (i=6; i<12; i++) {
2477 s->block_last_index[i] = 0;
2478 s->block[i][0] = s->block[4][0];
2483 // non c quantize code returns incorrect block_last_index FIXME
2484 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2485 for (i = 0; i < mb_block_count; i++) {
2487 if (s->block_last_index[i] > 0) {
2488 for (j = 63; j > 0; j--) {
2489 if (s->block[i][s->intra_scantable.permutated[j]])
2492 s->block_last_index[i] = j;
2497 /* huffman encode */
2498 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2499 case AV_CODEC_ID_MPEG1VIDEO:
2500 case AV_CODEC_ID_MPEG2VIDEO:
2501 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2502 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2504 case AV_CODEC_ID_MPEG4:
2505 if (CONFIG_MPEG4_ENCODER)
2506 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2508 case AV_CODEC_ID_MSMPEG4V2:
2509 case AV_CODEC_ID_MSMPEG4V3:
2510 case AV_CODEC_ID_WMV1:
2511 if (CONFIG_MSMPEG4_ENCODER)
2512 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2514 case AV_CODEC_ID_WMV2:
2515 if (CONFIG_WMV2_ENCODER)
2516 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2518 case AV_CODEC_ID_H261:
2519 if (CONFIG_H261_ENCODER)
2520 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2522 case AV_CODEC_ID_H263:
2523 case AV_CODEC_ID_H263P:
2524 case AV_CODEC_ID_FLV1:
2525 case AV_CODEC_ID_RV10:
2526 case AV_CODEC_ID_RV20:
2527 if (CONFIG_H263_ENCODER)
2528 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2530 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2531 case AV_CODEC_ID_MJPEG:
2532 case AV_CODEC_ID_AMV:
2533 ff_mjpeg_encode_mb(s, s->block);
2536 case AV_CODEC_ID_SPEEDHQ:
2537 if (CONFIG_SPEEDHQ_ENCODER)
2538 ff_speedhq_encode_mb(s, s->block);
2545 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2547 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2548 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2549 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2552 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2555 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2558 d->mb_skip_run= s->mb_skip_run;
2560 d->last_dc[i] = s->last_dc[i];
2563 d->mv_bits= s->mv_bits;
2564 d->i_tex_bits= s->i_tex_bits;
2565 d->p_tex_bits= s->p_tex_bits;
2566 d->i_count= s->i_count;
2567 d->f_count= s->f_count;
2568 d->b_count= s->b_count;
2569 d->skip_count= s->skip_count;
2570 d->misc_bits= s->misc_bits;
2574 d->qscale= s->qscale;
2575 d->dquant= s->dquant;
2577 d->esc3_level_length= s->esc3_level_length;
2580 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2583 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2584 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2587 d->mb_skip_run= s->mb_skip_run;
2589 d->last_dc[i] = s->last_dc[i];
2592 d->mv_bits= s->mv_bits;
2593 d->i_tex_bits= s->i_tex_bits;
2594 d->p_tex_bits= s->p_tex_bits;
2595 d->i_count= s->i_count;
2596 d->f_count= s->f_count;
2597 d->b_count= s->b_count;
2598 d->skip_count= s->skip_count;
2599 d->misc_bits= s->misc_bits;
2601 d->mb_intra= s->mb_intra;
2602 d->mb_skipped= s->mb_skipped;
2603 d->mv_type= s->mv_type;
2604 d->mv_dir= s->mv_dir;
2606 if(s->data_partitioning){
2608 d->tex_pb= s->tex_pb;
2612 d->block_last_index[i]= s->block_last_index[i];
2613 d->interlaced_dct= s->interlaced_dct;
2614 d->qscale= s->qscale;
2616 d->esc3_level_length= s->esc3_level_length;
2619 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2620 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2621 int *dmin, int *next_block, int motion_x, int motion_y)
2624 uint8_t *dest_backup[3];
2626 copy_context_before_encode(s, backup, type);
2628 s->block= s->blocks[*next_block];
2629 s->pb= pb[*next_block];
2630 if(s->data_partitioning){
2631 s->pb2 = pb2 [*next_block];
2632 s->tex_pb= tex_pb[*next_block];
2636 memcpy(dest_backup, s->dest, sizeof(s->dest));
2637 s->dest[0] = s->sc.rd_scratchpad;
2638 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2639 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2640 av_assert0(s->linesize >= 32); //FIXME
2643 encode_mb(s, motion_x, motion_y);
2645 score= put_bits_count(&s->pb);
2646 if(s->data_partitioning){
2647 score+= put_bits_count(&s->pb2);
2648 score+= put_bits_count(&s->tex_pb);
2651 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2652 ff_mpv_reconstruct_mb(s, s->block);
2654 score *= s->lambda2;
2655 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2659 memcpy(s->dest, dest_backup, sizeof(s->dest));
2666 copy_context_after_encode(best, s, type);
2670 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2671 const uint32_t *sq = ff_square_tab + 256;
2676 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2677 else if(w==8 && h==8)
2678 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2682 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2691 static int sse_mb(MpegEncContext *s){
2695 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2696 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2699 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2700 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2701 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2702 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2704 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2705 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2706 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2709 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2710 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2711 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2714 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2715 MpegEncContext *s= *(void**)arg;
2719 s->me.dia_size= s->avctx->pre_dia_size;
2720 s->first_slice_line=1;
2721 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2722 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2723 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2725 s->first_slice_line=0;
2733 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2734 MpegEncContext *s= *(void**)arg;
2736 s->me.dia_size= s->avctx->dia_size;
2737 s->first_slice_line=1;
2738 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2739 s->mb_x=0; //for block init below
2740 ff_init_block_index(s);
2741 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2742 s->block_index[0]+=2;
2743 s->block_index[1]+=2;
2744 s->block_index[2]+=2;
2745 s->block_index[3]+=2;
2747 /* compute motion vector & mb_type and store in context */
2748 if(s->pict_type==AV_PICTURE_TYPE_B)
2749 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2751 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2753 s->first_slice_line=0;
2758 static int mb_var_thread(AVCodecContext *c, void *arg){
2759 MpegEncContext *s= *(void**)arg;
2762 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2763 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2766 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2768 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2770 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2771 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2773 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2774 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2775 s->me.mb_var_sum_temp += varc;
2781 static void write_slice_end(MpegEncContext *s){
2782 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2783 if(s->partitioned_frame){
2784 ff_mpeg4_merge_partitions(s);
2787 ff_mpeg4_stuffing(&s->pb);
2788 } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2789 s->out_format == FMT_MJPEG) {
2790 ff_mjpeg_encode_stuffing(s);
2791 } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2792 ff_speedhq_end_slice(s);
2795 flush_put_bits(&s->pb);
2797 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2798 s->misc_bits+= get_bits_diff(s);
2801 static void write_mb_info(MpegEncContext *s)
2803 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2804 int offset = put_bits_count(&s->pb);
2805 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2806 int gobn = s->mb_y / s->gob_index;
2808 if (CONFIG_H263_ENCODER)
2809 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2810 bytestream_put_le32(&ptr, offset);
2811 bytestream_put_byte(&ptr, s->qscale);
2812 bytestream_put_byte(&ptr, gobn);
2813 bytestream_put_le16(&ptr, mba);
2814 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2815 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2816 /* 4MV not implemented */
2817 bytestream_put_byte(&ptr, 0); /* hmv2 */
2818 bytestream_put_byte(&ptr, 0); /* vmv2 */
2821 static void update_mb_info(MpegEncContext *s, int startcode)
2825 if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2826 s->mb_info_size += 12;
2827 s->prev_mb_info = s->last_mb_info;
2830 s->prev_mb_info = put_bytes_count(&s->pb, 0);
2831 /* This might have incremented mb_info_size above, and we return without
2832 * actually writing any info into that slot yet. But in that case,
2833 * this will be called again at the start of the after writing the
2834 * start code, actually writing the mb info. */
2838 s->last_mb_info = put_bytes_count(&s->pb, 0);
2839 if (!s->mb_info_size)
2840 s->mb_info_size += 12;
2844 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2846 if (put_bytes_left(&s->pb, 0) < threshold
2847 && s->slice_context_count == 1
2848 && s->pb.buf == s->avctx->internal->byte_buffer) {
2849 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2850 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2852 uint8_t *new_buffer = NULL;
2853 int new_buffer_size = 0;
2855 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2856 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2857 return AVERROR(ENOMEM);
2862 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2863 s->avctx->internal->byte_buffer_size + size_increase);
2865 return AVERROR(ENOMEM);
2867 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2868 av_free(s->avctx->internal->byte_buffer);
2869 s->avctx->internal->byte_buffer = new_buffer;
2870 s->avctx->internal->byte_buffer_size = new_buffer_size;
2871 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2872 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2873 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2875 if (put_bytes_left(&s->pb, 0) < threshold)
2876 return AVERROR(EINVAL);
2880 static int encode_thread(AVCodecContext *c, void *arg){
2881 MpegEncContext *s= *(void**)arg;
2882 int mb_x, mb_y, mb_y_order;
2883 int chr_h= 16>>s->chroma_y_shift;
2885 MpegEncContext best_s = { 0 }, backup_s;
2886 uint8_t bit_buf[2][MAX_MB_BYTES];
2887 uint8_t bit_buf2[2][MAX_MB_BYTES];
2888 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2889 PutBitContext pb[2], pb2[2], tex_pb[2];
2892 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2893 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2894 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2897 s->last_bits= put_bits_count(&s->pb);
2908 /* init last dc values */
2909 /* note: quant matrix value (8) is implied here */
2910 s->last_dc[i] = 128 << s->intra_dc_precision;
2912 s->current_picture.encoding_error[i] = 0;
2914 if(s->codec_id==AV_CODEC_ID_AMV){
2915 s->last_dc[0] = 128*8/13;
2916 s->last_dc[1] = 128*8/14;
2917 s->last_dc[2] = 128*8/14;
2920 memset(s->last_mv, 0, sizeof(s->last_mv));
2924 switch(s->codec_id){
2925 case AV_CODEC_ID_H263:
2926 case AV_CODEC_ID_H263P:
2927 case AV_CODEC_ID_FLV1:
2928 if (CONFIG_H263_ENCODER)
2929 s->gob_index = H263_GOB_HEIGHT(s->height);
2931 case AV_CODEC_ID_MPEG4:
2932 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2933 ff_mpeg4_init_partitions(s);
2939 s->first_slice_line = 1;
2940 s->ptr_lastgob = s->pb.buf;
2941 for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2942 if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
2944 mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
2945 if (first_in_slice && mb_y_order != s->start_mb_y)
2946 ff_speedhq_end_slice(s);
2947 s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
2954 ff_set_qscale(s, s->qscale);
2955 ff_init_block_index(s);
2957 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2958 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2959 int mb_type= s->mb_type[xy];
2963 int size_increase = s->avctx->internal->byte_buffer_size/4
2964 + s->mb_width*MAX_MB_BYTES;
2966 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2967 if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
2968 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2971 if(s->data_partitioning){
2972 if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
2973 put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
2974 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2980 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2981 ff_update_block_index(s);
2983 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2984 ff_h261_reorder_mb_index(s);
2985 xy= s->mb_y*s->mb_stride + s->mb_x;
2986 mb_type= s->mb_type[xy];
2989 /* write gob / video packet header */
2991 int current_packet_size, is_gob_start;
2993 current_packet_size = put_bytes_count(&s->pb, 1)
2994 - (s->ptr_lastgob - s->pb.buf);
2996 is_gob_start = s->rtp_payload_size &&
2997 current_packet_size >= s->rtp_payload_size &&
3000 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3002 switch(s->codec_id){
3003 case AV_CODEC_ID_H263:
3004 case AV_CODEC_ID_H263P:
3005 if(!s->h263_slice_structured)
3006 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3008 case AV_CODEC_ID_MPEG2VIDEO:
3009 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3010 case AV_CODEC_ID_MPEG1VIDEO:
3011 if(s->mb_skip_run) is_gob_start=0;
3013 case AV_CODEC_ID_MJPEG:
3014 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3019 if(s->start_mb_y != mb_y || mb_x!=0){
3022 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3023 ff_mpeg4_init_partitions(s);
3027 av_assert2((put_bits_count(&s->pb)&7) == 0);
3028 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3030 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3031 int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
3032 int d = 100 / s->error_rate;
3034 current_packet_size=0;
3035 s->pb.buf_ptr= s->ptr_lastgob;
3036 av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3040 #if FF_API_RTP_CALLBACK
3041 FF_DISABLE_DEPRECATION_WARNINGS
3042 if (s->avctx->rtp_callback){
3043 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3044 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3046 FF_ENABLE_DEPRECATION_WARNINGS
3048 update_mb_info(s, 1);
3050 switch(s->codec_id){
3051 case AV_CODEC_ID_MPEG4:
3052 if (CONFIG_MPEG4_ENCODER) {
3053 ff_mpeg4_encode_video_packet_header(s);
3054 ff_mpeg4_clean_buffers(s);
3057 case AV_CODEC_ID_MPEG1VIDEO:
3058 case AV_CODEC_ID_MPEG2VIDEO:
3059 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3060 ff_mpeg1_encode_slice_header(s);
3061 ff_mpeg1_clean_buffers(s);
3064 case AV_CODEC_ID_H263:
3065 case AV_CODEC_ID_H263P:
3066 if (CONFIG_H263_ENCODER)
3067 ff_h263_encode_gob_header(s, mb_y);
3071 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3072 int bits= put_bits_count(&s->pb);
3073 s->misc_bits+= bits - s->last_bits;
3077 s->ptr_lastgob += current_packet_size;
3078 s->first_slice_line=1;
3079 s->resync_mb_x=mb_x;
3080 s->resync_mb_y=mb_y;
3084 if( (s->resync_mb_x == s->mb_x)
3085 && s->resync_mb_y+1 == s->mb_y){
3086 s->first_slice_line=0;
3090 s->dquant=0; //only for QP_RD
3092 update_mb_info(s, 0);
3094 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3096 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3098 copy_context_before_encode(&backup_s, s, -1);
3100 best_s.data_partitioning= s->data_partitioning;
3101 best_s.partitioned_frame= s->partitioned_frame;
3102 if(s->data_partitioning){
3103 backup_s.pb2= s->pb2;
3104 backup_s.tex_pb= s->tex_pb;
3107 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3108 s->mv_dir = MV_DIR_FORWARD;
3109 s->mv_type = MV_TYPE_16X16;
3111 s->mv[0][0][0] = s->p_mv_table[xy][0];
3112 s->mv[0][0][1] = s->p_mv_table[xy][1];
3113 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3114 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3116 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3117 s->mv_dir = MV_DIR_FORWARD;
3118 s->mv_type = MV_TYPE_FIELD;
3121 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3122 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3123 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3125 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3126 &dmin, &next_block, 0, 0);
3128 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3129 s->mv_dir = MV_DIR_FORWARD;
3130 s->mv_type = MV_TYPE_16X16;
3134 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3135 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3137 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3138 s->mv_dir = MV_DIR_FORWARD;
3139 s->mv_type = MV_TYPE_8X8;
3142 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3143 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3145 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3146 &dmin, &next_block, 0, 0);
3148 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3149 s->mv_dir = MV_DIR_FORWARD;
3150 s->mv_type = MV_TYPE_16X16;
3152 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3153 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3154 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3155 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3157 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3158 s->mv_dir = MV_DIR_BACKWARD;
3159 s->mv_type = MV_TYPE_16X16;
3161 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3162 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3163 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3164 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3166 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3167 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3168 s->mv_type = MV_TYPE_16X16;
3170 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3171 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3172 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3173 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3174 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3175 &dmin, &next_block, 0, 0);
3177 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3178 s->mv_dir = MV_DIR_FORWARD;
3179 s->mv_type = MV_TYPE_FIELD;
3182 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3183 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3184 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3186 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3187 &dmin, &next_block, 0, 0);
3189 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3190 s->mv_dir = MV_DIR_BACKWARD;
3191 s->mv_type = MV_TYPE_FIELD;
3194 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3195 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3196 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3198 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3199 &dmin, &next_block, 0, 0);
3201 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3202 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3203 s->mv_type = MV_TYPE_FIELD;
3205 for(dir=0; dir<2; dir++){
3207 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3208 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3209 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3212 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3213 &dmin, &next_block, 0, 0);
3215 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3217 s->mv_type = MV_TYPE_16X16;
3221 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3222 &dmin, &next_block, 0, 0);
3223 if(s->h263_pred || s->h263_aic){
3225 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3227 ff_clean_intra_table_entries(s); //old mode?
3231 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3232 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3233 const int last_qp= backup_s.qscale;
3236 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3237 static const int dquant_tab[4]={-1,1,-2,2};
3238 int storecoefs = s->mb_intra && s->dc_val[0];
3240 av_assert2(backup_s.dquant == 0);
3243 s->mv_dir= best_s.mv_dir;
3244 s->mv_type = MV_TYPE_16X16;
3245 s->mb_intra= best_s.mb_intra;
3246 s->mv[0][0][0] = best_s.mv[0][0][0];
3247 s->mv[0][0][1] = best_s.mv[0][0][1];
3248 s->mv[1][0][0] = best_s.mv[1][0][0];
3249 s->mv[1][0][1] = best_s.mv[1][0][1];
3251 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3252 for(; qpi<4; qpi++){
3253 int dquant= dquant_tab[qpi];
3254 qp= last_qp + dquant;
3255 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3257 backup_s.dquant= dquant;
3260 dc[i]= s->dc_val[0][ s->block_index[i] ];
3261 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3265 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3266 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3267 if(best_s.qscale != qp){
3270 s->dc_val[0][ s->block_index[i] ]= dc[i];
3271 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3278 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3279 int mx= s->b_direct_mv_table[xy][0];
3280 int my= s->b_direct_mv_table[xy][1];
3282 backup_s.dquant = 0;
3283 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3285 ff_mpeg4_set_direct_mv(s, mx, my);
3286 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3287 &dmin, &next_block, mx, my);
3289 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3290 backup_s.dquant = 0;
3291 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3293 ff_mpeg4_set_direct_mv(s, 0, 0);
3294 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3295 &dmin, &next_block, 0, 0);
3297 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3300 coded |= s->block_last_index[i];
3303 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3304 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3305 mx=my=0; //FIXME find the one we actually used
3306 ff_mpeg4_set_direct_mv(s, mx, my);
3307 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3315 s->mv_dir= best_s.mv_dir;
3316 s->mv_type = best_s.mv_type;
3318 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3319 s->mv[0][0][1] = best_s.mv[0][0][1];
3320 s->mv[1][0][0] = best_s.mv[1][0][0];
3321 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3324 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3325 &dmin, &next_block, mx, my);
3330 s->current_picture.qscale_table[xy] = best_s.qscale;
3332 copy_context_after_encode(s, &best_s, -1);
3334 pb_bits_count= put_bits_count(&s->pb);
3335 flush_put_bits(&s->pb);
3336 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3339 if(s->data_partitioning){
3340 pb2_bits_count= put_bits_count(&s->pb2);
3341 flush_put_bits(&s->pb2);
3342 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3343 s->pb2= backup_s.pb2;
3345 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3346 flush_put_bits(&s->tex_pb);
3347 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3348 s->tex_pb= backup_s.tex_pb;
3350 s->last_bits= put_bits_count(&s->pb);
3352 if (CONFIG_H263_ENCODER &&
3353 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3354 ff_h263_update_motion_val(s);
3356 if(next_block==0){ //FIXME 16 vs linesize16
3357 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3358 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3359 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3362 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3363 ff_mpv_reconstruct_mb(s, s->block);
3365 int motion_x = 0, motion_y = 0;
3366 s->mv_type=MV_TYPE_16X16;
3367 // only one MB-Type possible
3370 case CANDIDATE_MB_TYPE_INTRA:
3373 motion_x= s->mv[0][0][0] = 0;
3374 motion_y= s->mv[0][0][1] = 0;
3376 case CANDIDATE_MB_TYPE_INTER:
3377 s->mv_dir = MV_DIR_FORWARD;
3379 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3380 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3382 case CANDIDATE_MB_TYPE_INTER_I:
3383 s->mv_dir = MV_DIR_FORWARD;
3384 s->mv_type = MV_TYPE_FIELD;
3387 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3388 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3389 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3392 case CANDIDATE_MB_TYPE_INTER4V:
3393 s->mv_dir = MV_DIR_FORWARD;
3394 s->mv_type = MV_TYPE_8X8;
3397 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3398 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3401 case CANDIDATE_MB_TYPE_DIRECT:
3402 if (CONFIG_MPEG4_ENCODER) {
3403 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3405 motion_x=s->b_direct_mv_table[xy][0];
3406 motion_y=s->b_direct_mv_table[xy][1];
3407 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3410 case CANDIDATE_MB_TYPE_DIRECT0:
3411 if (CONFIG_MPEG4_ENCODER) {
3412 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3414 ff_mpeg4_set_direct_mv(s, 0, 0);
3417 case CANDIDATE_MB_TYPE_BIDIR:
3418 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3420 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3421 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3422 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3423 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3425 case CANDIDATE_MB_TYPE_BACKWARD:
3426 s->mv_dir = MV_DIR_BACKWARD;
3428 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3429 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3431 case CANDIDATE_MB_TYPE_FORWARD:
3432 s->mv_dir = MV_DIR_FORWARD;
3434 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3435 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3437 case CANDIDATE_MB_TYPE_FORWARD_I:
3438 s->mv_dir = MV_DIR_FORWARD;
3439 s->mv_type = MV_TYPE_FIELD;
3442 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3443 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3444 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3447 case CANDIDATE_MB_TYPE_BACKWARD_I:
3448 s->mv_dir = MV_DIR_BACKWARD;
3449 s->mv_type = MV_TYPE_FIELD;
3452 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3453 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3454 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3457 case CANDIDATE_MB_TYPE_BIDIR_I:
3458 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3459 s->mv_type = MV_TYPE_FIELD;
3461 for(dir=0; dir<2; dir++){
3463 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3464 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3465 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3470 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3473 encode_mb(s, motion_x, motion_y);
3475 // RAL: Update last macroblock type
3476 s->last_mv_dir = s->mv_dir;
3478 if (CONFIG_H263_ENCODER &&
3479 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3480 ff_h263_update_motion_val(s);
3482 ff_mpv_reconstruct_mb(s, s->block);
3485 /* clean the MV table in IPS frames for direct mode in B-frames */
3486 if(s->mb_intra /* && I,P,S_TYPE */){
3487 s->p_mv_table[xy][0]=0;
3488 s->p_mv_table[xy][1]=0;
3491 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3495 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3496 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3498 s->current_picture.encoding_error[0] += sse(
3499 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3500 s->dest[0], w, h, s->linesize);
3501 s->current_picture.encoding_error[1] += sse(
3502 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3503 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3504 s->current_picture.encoding_error[2] += sse(
3505 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3506 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3509 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3510 ff_h263_loop_filter(s);
3512 ff_dlog(s->avctx, "MB %d %d bits\n",
3513 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3517 //not beautiful here but we must write it before flushing so it has to be here
3518 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3519 ff_msmpeg4_encode_ext_header(s);
3523 #if FF_API_RTP_CALLBACK
3524 FF_DISABLE_DEPRECATION_WARNINGS
3525 /* Send the last GOB if RTP */
3526 if (s->avctx->rtp_callback) {
3527 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3528 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3529 /* Call the RTP callback to send the last GOB */
3531 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3533 FF_ENABLE_DEPRECATION_WARNINGS
3539 #define MERGE(field) dst->field += src->field; src->field=0
3540 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3541 MERGE(me.scene_change_score);
3542 MERGE(me.mc_mb_var_sum_temp);
3543 MERGE(me.mb_var_sum_temp);
3546 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3549 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3550 MERGE(dct_count[1]);
3559 MERGE(er.error_count);
3560 MERGE(padding_bug_score);
3561 MERGE(current_picture.encoding_error[0]);
3562 MERGE(current_picture.encoding_error[1]);
3563 MERGE(current_picture.encoding_error[2]);
3565 if (dst->noise_reduction){
3566 for(i=0; i<64; i++){
3567 MERGE(dct_error_sum[0][i]);
3568 MERGE(dct_error_sum[1][i]);
3572 av_assert1(put_bits_count(&src->pb) % 8 ==0);
3573 av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3574 ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3575 flush_put_bits(&dst->pb);
3578 static int estimate_qp(MpegEncContext *s, int dry_run){
3579 if (s->next_lambda){
3580 s->current_picture_ptr->f->quality =
3581 s->current_picture.f->quality = s->next_lambda;
3582 if(!dry_run) s->next_lambda= 0;
3583 } else if (!s->fixed_qscale) {
3584 int quality = ff_rate_estimate_qscale(s, dry_run);
3585 s->current_picture_ptr->f->quality =
3586 s->current_picture.f->quality = quality;
3587 if (s->current_picture.f->quality < 0)
3591 if(s->adaptive_quant){
3592 switch(s->codec_id){
3593 case AV_CODEC_ID_MPEG4:
3594 if (CONFIG_MPEG4_ENCODER)
3595 ff_clean_mpeg4_qscales(s);
3597 case AV_CODEC_ID_H263:
3598 case AV_CODEC_ID_H263P:
3599 case AV_CODEC_ID_FLV1:
3600 if (CONFIG_H263_ENCODER)
3601 ff_clean_h263_qscales(s);
3604 ff_init_qscale_tab(s);
3607 s->lambda= s->lambda_table[0];
3610 s->lambda = s->current_picture.f->quality;
3615 /* must be called before writing the header */
3616 static void set_frame_distances(MpegEncContext * s){
3617 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3618 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3620 if(s->pict_type==AV_PICTURE_TYPE_B){
3621 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3622 av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3624 s->pp_time= s->time - s->last_non_b_time;
3625 s->last_non_b_time= s->time;
3626 av_assert1(s->picture_number==0 || s->pp_time > 0);
3630 static int encode_picture(MpegEncContext *s, int picture_number)
3634 int context_count = s->slice_context_count;
3636 s->picture_number = picture_number;
3638 /* Reset the average MB variance */
3639 s->me.mb_var_sum_temp =
3640 s->me.mc_mb_var_sum_temp = 0;
3642 /* we need to initialize some time vars before we can encode B-frames */
3643 // RAL: Condition added for MPEG1VIDEO
3644 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3645 set_frame_distances(s);
3646 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3647 ff_set_mpeg4_time(s);
3649 s->me.scene_change_score=0;
3651 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3653 if(s->pict_type==AV_PICTURE_TYPE_I){
3654 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3655 else s->no_rounding=0;
3656 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3657 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3658 s->no_rounding ^= 1;
3661 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3662 if (estimate_qp(s,1) < 0)
3664 ff_get_2pass_fcode(s);
3665 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3666 if(s->pict_type==AV_PICTURE_TYPE_B)
3667 s->lambda= s->last_lambda_for[s->pict_type];
3669 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3673 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3674 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3675 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3676 s->q_chroma_intra_matrix = s->q_intra_matrix;
3677 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3680 s->mb_intra=0; //for the rate distortion & bit compare functions
3681 for(i=1; i<context_count; i++){
3682 ret = ff_update_duplicate_context(s->thread_context[i], s);
3690 /* Estimate motion for every MB */
3691 if(s->pict_type != AV_PICTURE_TYPE_I){
3692 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3693 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3694 if (s->pict_type != AV_PICTURE_TYPE_B) {
3695 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3697 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3701 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3702 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3704 for(i=0; i<s->mb_stride*s->mb_height; i++)
3705 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3707 if(!s->fixed_qscale){
3708 /* finding spatial complexity for I-frame rate control */
3709 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3712 for(i=1; i<context_count; i++){
3713 merge_context_after_me(s, s->thread_context[i]);
3715 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3716 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3719 if (s->me.scene_change_score > s->scenechange_threshold &&
3720 s->pict_type == AV_PICTURE_TYPE_P) {
3721 s->pict_type= AV_PICTURE_TYPE_I;
3722 for(i=0; i<s->mb_stride*s->mb_height; i++)
3723 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3724 if(s->msmpeg4_version >= 3)
3726 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3727 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3731 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3732 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3734 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3736 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3737 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3738 s->f_code= FFMAX3(s->f_code, a, b);
3741 ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
3742 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3743 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3747 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3748 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3753 if(s->pict_type==AV_PICTURE_TYPE_B){
3756 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3757 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3758 s->f_code = FFMAX(a, b);
3760 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3761 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3762 s->b_code = FFMAX(a, b);
3764 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3765 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3766 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3767 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3768 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3770 for(dir=0; dir<2; dir++){
3773 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3774 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3775 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3776 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3784 if (estimate_qp(s, 0) < 0)
3787 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3788 s->pict_type == AV_PICTURE_TYPE_I &&
3789 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3790 s->qscale= 3; //reduce clipping problems
3792 if (s->out_format == FMT_MJPEG) {
3793 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3794 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3796 if (s->avctx->intra_matrix) {
3798 luma_matrix = s->avctx->intra_matrix;
3800 if (s->avctx->chroma_intra_matrix)
3801 chroma_matrix = s->avctx->chroma_intra_matrix;
3803 /* for mjpeg, we do include qscale in the matrix */
3805 int j = s->idsp.idct_permutation[i];
3807 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3808 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3810 s->y_dc_scale_table=
3811 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3812 s->chroma_intra_matrix[0] =
3813 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3814 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3815 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3816 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3817 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3820 if(s->codec_id == AV_CODEC_ID_AMV){
3821 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3822 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3824 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3826 s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3827 s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3829 s->y_dc_scale_table= y;
3830 s->c_dc_scale_table= c;
3831 s->intra_matrix[0] = 13;
3832 s->chroma_intra_matrix[0] = 14;
3833 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3834 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3835 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3836 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3840 if (s->out_format == FMT_SPEEDHQ) {
3841 s->y_dc_scale_table=
3842 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3845 //FIXME var duplication
3846 s->current_picture_ptr->f->key_frame =
3847 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3848 s->current_picture_ptr->f->pict_type =
3849 s->current_picture.f->pict_type = s->pict_type;
3851 if (s->current_picture.f->key_frame)
3852 s->picture_in_gop_number=0;
3854 s->mb_x = s->mb_y = 0;
3855 s->last_bits= put_bits_count(&s->pb);
3856 switch(s->out_format) {
3857 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3859 /* s->huffman == HUFFMAN_TABLE_OPTIMAL can only be true for MJPEG. */
3860 if (!CONFIG_MJPEG_ENCODER || s->huffman != HUFFMAN_TABLE_OPTIMAL)
3861 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3862 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3866 if (CONFIG_SPEEDHQ_ENCODER)
3867 ff_speedhq_encode_picture_header(s);
3870 if (CONFIG_H261_ENCODER)
3871 ff_h261_encode_picture_header(s, picture_number);
3874 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3875 ff_wmv2_encode_picture_header(s, picture_number);
3876 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3877 ff_msmpeg4_encode_picture_header(s, picture_number);
3878 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3879 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3882 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3883 ret = ff_rv10_encode_picture_header(s, picture_number);
3887 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3888 ff_rv20_encode_picture_header(s, picture_number);
3889 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3890 ff_flv_encode_picture_header(s, picture_number);
3891 else if (CONFIG_H263_ENCODER)
3892 ff_h263_encode_picture_header(s, picture_number);
3895 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3896 ff_mpeg1_encode_picture_header(s, picture_number);
3901 bits= put_bits_count(&s->pb);
3902 s->header_bits= bits - s->last_bits;
3904 for(i=1; i<context_count; i++){
3905 update_duplicate_context_after_me(s->thread_context[i], s);
3907 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3908 for(i=1; i<context_count; i++){
3909 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3910 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3911 merge_context_after_encode(s, s->thread_context[i]);
3917 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3918 const int intra= s->mb_intra;
3921 s->dct_count[intra]++;
3923 for(i=0; i<64; i++){
3924 int level= block[i];
3928 s->dct_error_sum[intra][i] += level;
3929 level -= s->dct_offset[intra][i];
3930 if(level<0) level=0;
3932 s->dct_error_sum[intra][i] -= level;
3933 level += s->dct_offset[intra][i];
3934 if(level>0) level=0;
3941 static int dct_quantize_trellis_c(MpegEncContext *s,
3942 int16_t *block, int n,
3943 int qscale, int *overflow){
3945 const uint16_t *matrix;
3946 const uint8_t *scantable;
3947 const uint8_t *perm_scantable;
3949 unsigned int threshold1, threshold2;
3961 int coeff_count[64];
3962 int qmul, qadd, start_i, last_non_zero, i, dc;
3963 const int esc_length= s->ac_esc_length;
3965 uint8_t * last_length;
3966 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3969 s->fdsp.fdct(block);
3971 if(s->dct_error_sum)
3972 s->denoise_dct(s, block);
3974 qadd= ((qscale-1)|1)*8;
3976 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3977 else mpeg2_qscale = qscale << 1;
3981 scantable= s->intra_scantable.scantable;
3982 perm_scantable= s->intra_scantable.permutated;
3990 /* For AIC we skip quant/dequant of INTRADC */
3995 /* note: block[0] is assumed to be positive */
3996 block[0] = (block[0] + (q >> 1)) / q;
3999 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4000 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4001 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4002 bias= 1<<(QMAT_SHIFT-1);
4004 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4005 length = s->intra_chroma_ac_vlc_length;
4006 last_length= s->intra_chroma_ac_vlc_last_length;
4008 length = s->intra_ac_vlc_length;
4009 last_length= s->intra_ac_vlc_last_length;
4012 scantable= s->inter_scantable.scantable;
4013 perm_scantable= s->inter_scantable.permutated;
4016 qmat = s->q_inter_matrix[qscale];
4017 matrix = s->inter_matrix;
4018 length = s->inter_ac_vlc_length;
4019 last_length= s->inter_ac_vlc_last_length;
4023 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4024 threshold2= (threshold1<<1);
4026 for(i=63; i>=start_i; i--) {
4027 const int j = scantable[i];
4028 int level = block[j] * qmat[j];
4030 if(((unsigned)(level+threshold1))>threshold2){
4036 for(i=start_i; i<=last_non_zero; i++) {
4037 const int j = scantable[i];
4038 int level = block[j] * qmat[j];
4040 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4041 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4042 if(((unsigned)(level+threshold1))>threshold2){
4044 level= (bias + level)>>QMAT_SHIFT;
4046 coeff[1][i]= level-1;
4047 // coeff[2][k]= level-2;
4049 level= (bias - level)>>QMAT_SHIFT;
4050 coeff[0][i]= -level;
4051 coeff[1][i]= -level+1;
4052 // coeff[2][k]= -level+2;
4054 coeff_count[i]= FFMIN(level, 2);
4055 av_assert2(coeff_count[i]);
4058 coeff[0][i]= (level>>31)|1;
4063 *overflow= s->max_qcoeff < max; //overflow might have happened
4065 if(last_non_zero < start_i){
4066 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4067 return last_non_zero;
4070 score_tab[start_i]= 0;
4071 survivor[0]= start_i;
4074 for(i=start_i; i<=last_non_zero; i++){
4075 int level_index, j, zero_distortion;
4076 int dct_coeff= FFABS(block[ scantable[i] ]);
4077 int best_score=256*256*256*120;
4079 if (s->fdsp.fdct == ff_fdct_ifast)
4080 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4081 zero_distortion= dct_coeff*dct_coeff;
4083 for(level_index=0; level_index < coeff_count[i]; level_index++){
4085 int level= coeff[level_index][i];
4086 const int alevel= FFABS(level);
4091 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4092 unquant_coeff= alevel*qmul + qadd;
4093 } else if(s->out_format == FMT_MJPEG) {
4094 j = s->idsp.idct_permutation[scantable[i]];
4095 unquant_coeff = alevel * matrix[j] * 8;
4097 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4099 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4100 unquant_coeff = (unquant_coeff - 1) | 1;
4102 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4103 unquant_coeff = (unquant_coeff - 1) | 1;
4108 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4110 if((level&(~127)) == 0){
4111 for(j=survivor_count-1; j>=0; j--){
4112 int run= i - survivor[j];
4113 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4114 score += score_tab[i-run];
4116 if(score < best_score){
4119 level_tab[i+1]= level-64;
4123 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4124 for(j=survivor_count-1; j>=0; j--){
4125 int run= i - survivor[j];
4126 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4127 score += score_tab[i-run];
4128 if(score < last_score){
4131 last_level= level-64;
4137 distortion += esc_length*lambda;
4138 for(j=survivor_count-1; j>=0; j--){
4139 int run= i - survivor[j];
4140 int score= distortion + score_tab[i-run];
4142 if(score < best_score){
4145 level_tab[i+1]= level-64;
4149 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4150 for(j=survivor_count-1; j>=0; j--){
4151 int run= i - survivor[j];
4152 int score= distortion + score_tab[i-run];
4153 if(score < last_score){
4156 last_level= level-64;
4164 score_tab[i+1]= best_score;
4166 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4167 if(last_non_zero <= 27){
4168 for(; survivor_count; survivor_count--){
4169 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4173 for(; survivor_count; survivor_count--){
4174 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4179 survivor[ survivor_count++ ]= i+1;
4182 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4183 last_score= 256*256*256*120;
4184 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4185 int score= score_tab[i];
4187 score += lambda * 2; // FIXME more exact?
4189 if(score < last_score){
4192 last_level= level_tab[i];
4193 last_run= run_tab[i];
4198 s->coded_score[n] = last_score;
4200 dc= FFABS(block[0]);
4201 last_non_zero= last_i - 1;
4202 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4204 if(last_non_zero < start_i)
4205 return last_non_zero;
4207 if(last_non_zero == 0 && start_i == 0){
4209 int best_score= dc * dc;
4211 for(i=0; i<coeff_count[0]; i++){
4212 int level= coeff[i][0];
4213 int alevel= FFABS(level);
4214 int unquant_coeff, score, distortion;
4216 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4217 unquant_coeff= (alevel*qmul + qadd)>>3;
4219 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4220 unquant_coeff = (unquant_coeff - 1) | 1;
4222 unquant_coeff = (unquant_coeff + 4) >> 3;
4223 unquant_coeff<<= 3 + 3;
4225 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4227 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4228 else score= distortion + esc_length*lambda;
4230 if(score < best_score){
4232 best_level= level - 64;
4235 block[0]= best_level;
4236 s->coded_score[n] = best_score - dc*dc;
4237 if(best_level == 0) return -1;
4238 else return last_non_zero;
4242 av_assert2(last_level);
4244 block[ perm_scantable[last_non_zero] ]= last_level;
4247 for(; i>start_i; i -= run_tab[i] + 1){
4248 block[ perm_scantable[i-1] ]= level_tab[i];
4251 return last_non_zero;
4254 static int16_t basis[64][64];
4256 static void build_basis(uint8_t *perm){
4263 double s= 0.25*(1<<BASIS_SHIFT);
4265 int perm_index= perm[index];
4266 if(i==0) s*= sqrt(0.5);
4267 if(j==0) s*= sqrt(0.5);
4268 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4275 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4276 int16_t *block, int16_t *weight, int16_t *orig,
4279 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4280 const uint8_t *scantable;
4281 const uint8_t *perm_scantable;
4282 // unsigned int threshold1, threshold2;
4287 int qmul, qadd, start_i, last_non_zero, i, dc;
4289 uint8_t * last_length;
4291 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4293 if(basis[0][0] == 0)
4294 build_basis(s->idsp.idct_permutation);
4299 scantable= s->intra_scantable.scantable;
4300 perm_scantable= s->intra_scantable.permutated;
4307 /* For AIC we skip quant/dequant of INTRADC */
4311 q <<= RECON_SHIFT-3;
4312 /* note: block[0] is assumed to be positive */
4314 // block[0] = (block[0] + (q >> 1)) / q;
4316 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4317 // bias= 1<<(QMAT_SHIFT-1);
4318 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4319 length = s->intra_chroma_ac_vlc_length;
4320 last_length= s->intra_chroma_ac_vlc_last_length;
4322 length = s->intra_ac_vlc_length;
4323 last_length= s->intra_ac_vlc_last_length;
4326 scantable= s->inter_scantable.scantable;
4327 perm_scantable= s->inter_scantable.permutated;
4330 length = s->inter_ac_vlc_length;
4331 last_length= s->inter_ac_vlc_last_length;
4333 last_non_zero = s->block_last_index[n];
4335 dc += (1<<(RECON_SHIFT-1));
4336 for(i=0; i<64; i++){
4337 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4341 for(i=0; i<64; i++){
4346 w= FFABS(weight[i]) + qns*one;
4347 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4350 // w=weight[i] = (63*qns + (w/2)) / w;
4353 av_assert2(w<(1<<6));
4356 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4360 for(i=start_i; i<=last_non_zero; i++){
4361 int j= perm_scantable[i];
4362 const int level= block[j];
4366 if(level<0) coeff= qmul*level - qadd;
4367 else coeff= qmul*level + qadd;
4368 run_tab[rle_index++]=run;
4371 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4378 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4381 int run2, best_unquant_change=0, analyze_gradient;
4382 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4384 if(analyze_gradient){
4385 for(i=0; i<64; i++){
4388 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4394 const int level= block[0];
4395 int change, old_coeff;
4397 av_assert2(s->mb_intra);
4401 for(change=-1; change<=1; change+=2){
4402 int new_level= level + change;
4403 int score, new_coeff;
4405 new_coeff= q*new_level;
4406 if(new_coeff >= 2048 || new_coeff < 0)
4409 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4410 new_coeff - old_coeff);
4411 if(score<best_score){
4414 best_change= change;
4415 best_unquant_change= new_coeff - old_coeff;
4422 run2= run_tab[rle_index++];
4426 for(i=start_i; i<64; i++){
4427 int j= perm_scantable[i];
4428 const int level= block[j];
4429 int change, old_coeff;
4431 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4435 if(level<0) old_coeff= qmul*level - qadd;
4436 else old_coeff= qmul*level + qadd;
4437 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4441 av_assert2(run2>=0 || i >= last_non_zero );
4444 for(change=-1; change<=1; change+=2){
4445 int new_level= level + change;
4446 int score, new_coeff, unquant_change;
4449 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4453 if(new_level<0) new_coeff= qmul*new_level - qadd;
4454 else new_coeff= qmul*new_level + qadd;
4455 if(new_coeff >= 2048 || new_coeff <= -2048)
4457 //FIXME check for overflow
4460 if(level < 63 && level > -63){
4461 if(i < last_non_zero)
4462 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4463 - length[UNI_AC_ENC_INDEX(run, level+64)];
4465 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4466 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4469 av_assert2(FFABS(new_level)==1);
4471 if(analyze_gradient){
4472 int g= d1[ scantable[i] ];
4473 if(g && (g^new_level) >= 0)
4477 if(i < last_non_zero){
4478 int next_i= i + run2 + 1;
4479 int next_level= block[ perm_scantable[next_i] ] + 64;
4481 if(next_level&(~127))
4484 if(next_i < last_non_zero)
4485 score += length[UNI_AC_ENC_INDEX(run, 65)]
4486 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4487 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4489 score += length[UNI_AC_ENC_INDEX(run, 65)]
4490 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4491 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4493 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4495 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4496 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4502 av_assert2(FFABS(level)==1);
4504 if(i < last_non_zero){
4505 int next_i= i + run2 + 1;
4506 int next_level= block[ perm_scantable[next_i] ] + 64;
4508 if(next_level&(~127))
4511 if(next_i < last_non_zero)
4512 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4513 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4514 - length[UNI_AC_ENC_INDEX(run, 65)];
4516 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4517 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4518 - length[UNI_AC_ENC_INDEX(run, 65)];
4520 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4522 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4523 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4530 unquant_change= new_coeff - old_coeff;
4531 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4533 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4535 if(score<best_score){
4538 best_change= change;
4539 best_unquant_change= unquant_change;
4543 prev_level= level + 64;
4544 if(prev_level&(~127))
4554 int j= perm_scantable[ best_coeff ];
4556 block[j] += best_change;
4558 if(best_coeff > last_non_zero){
4559 last_non_zero= best_coeff;
4560 av_assert2(block[j]);
4562 for(; last_non_zero>=start_i; last_non_zero--){
4563 if(block[perm_scantable[last_non_zero]])
4570 for(i=start_i; i<=last_non_zero; i++){
4571 int j= perm_scantable[i];
4572 const int level= block[j];
4575 run_tab[rle_index++]=run;
4582 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4588 return last_non_zero;
4592 * Permute an 8x8 block according to permutation.
4593 * @param block the block which will be permuted according to
4594 * the given permutation vector
4595 * @param permutation the permutation vector
4596 * @param last the last non zero coefficient in scantable order, used to
4597 * speed the permutation up
4598 * @param scantable the used scantable, this is only used to speed the
4599 * permutation up, the block is not (inverse) permutated
4600 * to scantable order!
4602 void ff_block_permute(int16_t *block, uint8_t *permutation,
4603 const uint8_t *scantable, int last)
4610 //FIXME it is ok but not clean and might fail for some permutations
4611 // if (permutation[1] == 1)
4614 for (i = 0; i <= last; i++) {
4615 const int j = scantable[i];
4620 for (i = 0; i <= last; i++) {
4621 const int j = scantable[i];
4622 const int perm_j = permutation[j];
4623 block[perm_j] = temp[j];
4627 int ff_dct_quantize_c(MpegEncContext *s,
4628 int16_t *block, int n,
4629 int qscale, int *overflow)
4631 int i, j, level, last_non_zero, q, start_i;
4633 const uint8_t *scantable;
4636 unsigned int threshold1, threshold2;
4638 s->fdsp.fdct(block);
4640 if(s->dct_error_sum)
4641 s->denoise_dct(s, block);
4644 scantable= s->intra_scantable.scantable;
4652 /* For AIC we skip quant/dequant of INTRADC */
4655 /* note: block[0] is assumed to be positive */
4656 block[0] = (block[0] + (q >> 1)) / q;
4659 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4660 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4662 scantable= s->inter_scantable.scantable;
4665 qmat = s->q_inter_matrix[qscale];
4666 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4668 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4669 threshold2= (threshold1<<1);
4670 for(i=63;i>=start_i;i--) {
4672 level = block[j] * qmat[j];
4674 if(((unsigned)(level+threshold1))>threshold2){
4681 for(i=start_i; i<=last_non_zero; i++) {
4683 level = block[j] * qmat[j];
4685 // if( bias+level >= (1<<QMAT_SHIFT)
4686 // || bias-level >= (1<<QMAT_SHIFT)){
4687 if(((unsigned)(level+threshold1))>threshold2){
4689 level= (bias + level)>>QMAT_SHIFT;
4692 level= (bias - level)>>QMAT_SHIFT;
4700 *overflow= s->max_qcoeff < max; //overflow might have happened
4702 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4703 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4704 ff_block_permute(block, s->idsp.idct_permutation,
4705 scantable, last_non_zero);
4707 return last_non_zero;
4710 #define OFFSET(x) offsetof(MpegEncContext, x)
4711 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4712 static const AVOption h263_options[] = {
4713 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4714 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4716 #if FF_API_MPEGVIDEO_OPTS
4717 FF_MPV_DEPRECATED_MPEG_QUANT_OPT
4718 FF_MPV_DEPRECATED_A53_CC_OPT
4719 FF_MPV_DEPRECATED_MATRIX_OPT
4720 FF_MPV_DEPRECATED_BFRAME_OPTS
4725 static const AVClass h263_class = {
4726 .class_name = "H.263 encoder",
4727 .item_name = av_default_item_name,
4728 .option = h263_options,
4729 .version = LIBAVUTIL_VERSION_INT,
4732 AVCodec ff_h263_encoder = {
4734 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4735 .type = AVMEDIA_TYPE_VIDEO,
4736 .id = AV_CODEC_ID_H263,
4737 .priv_data_size = sizeof(MpegEncContext),
4738 .init = ff_mpv_encode_init,
4739 .encode2 = ff_mpv_encode_picture,
4740 .close = ff_mpv_encode_end,
4741 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
4742 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4743 .priv_class = &h263_class,
4746 static const AVOption h263p_options[] = {
4747 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4748 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4749 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4750 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4752 #if FF_API_MPEGVIDEO_OPTS
4753 FF_MPV_DEPRECATED_MPEG_QUANT_OPT
4754 FF_MPV_DEPRECATED_A53_CC_OPT
4755 FF_MPV_DEPRECATED_MATRIX_OPT
4756 FF_MPV_DEPRECATED_BFRAME_OPTS
4760 static const AVClass h263p_class = {
4761 .class_name = "H.263p encoder",
4762 .item_name = av_default_item_name,
4763 .option = h263p_options,
4764 .version = LIBAVUTIL_VERSION_INT,
4767 AVCodec ff_h263p_encoder = {
4769 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4770 .type = AVMEDIA_TYPE_VIDEO,
4771 .id = AV_CODEC_ID_H263P,
4772 .priv_data_size = sizeof(MpegEncContext),
4773 .init = ff_mpv_encode_init,
4774 .encode2 = ff_mpv_encode_picture,
4775 .close = ff_mpv_encode_end,
4776 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4777 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
4778 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4779 .priv_class = &h263p_class,
4782 static const AVClass msmpeg4v2_class = {
4783 .class_name = "msmpeg4v2 encoder",
4784 .item_name = av_default_item_name,
4785 .option = ff_mpv_generic_options,
4786 .version = LIBAVUTIL_VERSION_INT,
4789 AVCodec ff_msmpeg4v2_encoder = {
4790 .name = "msmpeg4v2",
4791 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4792 .type = AVMEDIA_TYPE_VIDEO,
4793 .id = AV_CODEC_ID_MSMPEG4V2,
4794 .priv_data_size = sizeof(MpegEncContext),
4795 .init = ff_mpv_encode_init,
4796 .encode2 = ff_mpv_encode_picture,
4797 .close = ff_mpv_encode_end,
4798 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4799 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4800 .priv_class = &msmpeg4v2_class,
4803 static const AVClass msmpeg4v3_class = {
4804 .class_name = "msmpeg4v3 encoder",
4805 .item_name = av_default_item_name,
4806 .option = ff_mpv_generic_options,
4807 .version = LIBAVUTIL_VERSION_INT,
4810 AVCodec ff_msmpeg4v3_encoder = {
4812 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4813 .type = AVMEDIA_TYPE_VIDEO,
4814 .id = AV_CODEC_ID_MSMPEG4V3,
4815 .priv_data_size = sizeof(MpegEncContext),
4816 .init = ff_mpv_encode_init,
4817 .encode2 = ff_mpv_encode_picture,
4818 .close = ff_mpv_encode_end,
4819 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4820 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4821 .priv_class = &msmpeg4v3_class,
4824 static const AVClass wmv1_class = {
4825 .class_name = "wmv1 encoder",
4826 .item_name = av_default_item_name,
4827 .option = ff_mpv_generic_options,
4828 .version = LIBAVUTIL_VERSION_INT,
4831 AVCodec ff_wmv1_encoder = {
4833 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4834 .type = AVMEDIA_TYPE_VIDEO,
4835 .id = AV_CODEC_ID_WMV1,
4836 .priv_data_size = sizeof(MpegEncContext),
4837 .init = ff_mpv_encode_init,
4838 .encode2 = ff_mpv_encode_picture,
4839 .close = ff_mpv_encode_end,
4840 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4841 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4842 .priv_class = &wmv1_class,