2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/mem_internal.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/thread.h"
47 #include "mpegvideo.h"
48 #include "mpegvideodata.h"
52 #include "mjpegenc_common.h"
54 #include "mpegutils.h"
56 #include "speedhqenc.h"
58 #include "pixblockdsp.h"
62 #include "aandcttab.h"
64 #include "mpeg4video.h"
66 #include "bytestream.h"
69 #include "packet_internal.h"
73 #define QUANT_BIAS_SHIFT 8
75 #define QMAT_SHIFT_MMX 16
78 static int encode_picture(MpegEncContext *s, int picture_number);
79 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
80 static int sse_mb(MpegEncContext *s);
81 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
82 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
84 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
85 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
87 const AVOption ff_mpv_generic_options[] = {
89 #if FF_API_MPEGVIDEO_OPTS
90 FF_MPV_DEPRECATED_MPEG_QUANT_OPT
91 FF_MPV_DEPRECATED_A53_CC_OPT
96 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
97 uint16_t (*qmat16)[2][64],
98 const uint16_t *quant_matrix,
99 int bias, int qmin, int qmax, int intra)
101 FDCTDSPContext *fdsp = &s->fdsp;
105 for (qscale = qmin; qscale <= qmax; qscale++) {
109 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
110 else qscale2 = qscale << 1;
112 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
114 fdsp->fdct == ff_faandct ||
115 #endif /* CONFIG_FAANDCT */
116 fdsp->fdct == ff_jpeg_fdct_islow_10) {
117 for (i = 0; i < 64; i++) {
118 const int j = s->idsp.idct_permutation[i];
119 int64_t den = (int64_t) qscale2 * quant_matrix[j];
120 /* 16 <= qscale * quant_matrix[i] <= 7905
121 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
122 * 19952 <= x <= 249205026
123 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
124 * 3444240 >= (1 << 36) / (x) >= 275 */
126 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
128 } else if (fdsp->fdct == ff_fdct_ifast) {
129 for (i = 0; i < 64; i++) {
130 const int j = s->idsp.idct_permutation[i];
131 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
132 /* 16 <= qscale * quant_matrix[i] <= 7905
133 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
134 * 19952 <= x <= 249205026
135 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
136 * 3444240 >= (1 << 36) / (x) >= 275 */
138 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
141 for (i = 0; i < 64; i++) {
142 const int j = s->idsp.idct_permutation[i];
143 int64_t den = (int64_t) qscale2 * quant_matrix[j];
144 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
145 * Assume x = qscale * quant_matrix[i]
147 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
148 * so 32768 >= (1 << 19) / (x) >= 67 */
149 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
150 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
151 // (qscale * quant_matrix[i]);
152 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
154 if (qmat16[qscale][0][i] == 0 ||
155 qmat16[qscale][0][i] == 128 * 256)
156 qmat16[qscale][0][i] = 128 * 256 - 1;
157 qmat16[qscale][1][i] =
158 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
159 qmat16[qscale][0][i]);
163 for (i = intra; i < 64; i++) {
165 if (fdsp->fdct == ff_fdct_ifast) {
166 max = (8191LL * ff_aanscales[i]) >> 14;
168 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
174 av_log(s->avctx, AV_LOG_INFO,
175 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
180 static inline void update_qscale(MpegEncContext *s)
182 if (s->q_scale_type == 1 && 0) {
184 int bestdiff=INT_MAX;
187 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
188 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
189 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
190 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
192 if (diff < bestdiff) {
199 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
200 (FF_LAMBDA_SHIFT + 7);
201 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
204 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
208 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
214 for (i = 0; i < 64; i++) {
215 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
222 * init s->current_picture.qscale_table from s->lambda_table
224 void ff_init_qscale_tab(MpegEncContext *s)
226 int8_t * const qscale_table = s->current_picture.qscale_table;
229 for (i = 0; i < s->mb_num; i++) {
230 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
231 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
232 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
237 static void update_duplicate_context_after_me(MpegEncContext *dst,
240 #define COPY(a) dst->a= src->a
242 COPY(current_picture);
248 COPY(picture_in_gop_number);
249 COPY(gop_picture_number);
250 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
251 COPY(progressive_frame); // FIXME don't set in encode_header
252 COPY(partitioned_frame); // FIXME don't set in encode_header
256 static void mpv_encode_init_static(void)
258 for (int i = -16; i < 16; i++)
259 default_fcode_tab[i + MAX_MV] = 1;
263 * Set the given MpegEncContext to defaults for encoding.
264 * the changed fields will not depend upon the prior state of the MpegEncContext.
266 static void mpv_encode_defaults(MpegEncContext *s)
268 static AVOnce init_static_once = AV_ONCE_INIT;
270 ff_mpv_common_defaults(s);
272 ff_thread_once(&init_static_once, mpv_encode_init_static);
274 s->me.mv_penalty = default_mv_penalty;
275 s->fcode_tab = default_fcode_tab;
277 s->input_picture_number = 0;
278 s->picture_in_gop_number = 0;
281 av_cold int ff_dct_encode_init(MpegEncContext *s)
284 ff_dct_encode_init_x86(s);
286 if (CONFIG_H263_ENCODER)
287 ff_h263dsp_init(&s->h263dsp);
288 if (!s->dct_quantize)
289 s->dct_quantize = ff_dct_quantize_c;
291 s->denoise_dct = denoise_dct_c;
292 s->fast_dct_quantize = s->dct_quantize;
293 if (s->avctx->trellis)
294 s->dct_quantize = dct_quantize_trellis_c;
299 /* init video encoder */
300 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
302 MpegEncContext *s = avctx->priv_data;
303 AVCPBProperties *cpb_props;
306 mpv_encode_defaults(s);
308 switch (avctx->pix_fmt) {
309 case AV_PIX_FMT_YUVJ444P:
310 case AV_PIX_FMT_YUV444P:
311 s->chroma_format = CHROMA_444;
313 case AV_PIX_FMT_YUVJ422P:
314 case AV_PIX_FMT_YUV422P:
315 s->chroma_format = CHROMA_422;
317 case AV_PIX_FMT_YUVJ420P:
318 case AV_PIX_FMT_YUV420P:
320 s->chroma_format = CHROMA_420;
324 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
326 #if FF_API_PRIVATE_OPT
327 FF_DISABLE_DEPRECATION_WARNINGS
328 if (avctx->rtp_payload_size)
329 s->rtp_payload_size = avctx->rtp_payload_size;
330 if (avctx->me_penalty_compensation)
331 s->me_penalty_compensation = avctx->me_penalty_compensation;
333 s->me_pre = avctx->pre_me;
334 FF_ENABLE_DEPRECATION_WARNINGS
337 s->bit_rate = avctx->bit_rate;
338 s->width = avctx->width;
339 s->height = avctx->height;
340 if (avctx->gop_size > 600 &&
341 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
342 av_log(avctx, AV_LOG_WARNING,
343 "keyframe interval too large!, reducing it from %d to %d\n",
344 avctx->gop_size, 600);
345 avctx->gop_size = 600;
347 s->gop_size = avctx->gop_size;
349 if (avctx->max_b_frames > MAX_B_FRAMES) {
350 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
351 "is %d.\n", MAX_B_FRAMES);
352 avctx->max_b_frames = MAX_B_FRAMES;
354 s->max_b_frames = avctx->max_b_frames;
355 s->codec_id = avctx->codec->id;
356 s->strict_std_compliance = avctx->strict_std_compliance;
357 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
358 s->rtp_mode = !!s->rtp_payload_size;
359 s->intra_dc_precision = avctx->intra_dc_precision;
361 // workaround some differences between how applications specify dc precision
362 if (s->intra_dc_precision < 0) {
363 s->intra_dc_precision += 8;
364 } else if (s->intra_dc_precision >= 8)
365 s->intra_dc_precision -= 8;
367 if (s->intra_dc_precision < 0) {
368 av_log(avctx, AV_LOG_ERROR,
369 "intra dc precision must be positive, note some applications use"
370 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
371 return AVERROR(EINVAL);
374 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
377 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
378 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
379 return AVERROR(EINVAL);
381 s->user_specified_pts = AV_NOPTS_VALUE;
383 if (s->gop_size <= 1) {
391 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
393 s->adaptive_quant = (avctx->lumi_masking ||
394 avctx->dark_masking ||
395 avctx->temporal_cplx_masking ||
396 avctx->spatial_cplx_masking ||
399 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
402 s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
404 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
405 switch(avctx->codec_id) {
406 case AV_CODEC_ID_MPEG1VIDEO:
407 case AV_CODEC_ID_MPEG2VIDEO:
408 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
410 case AV_CODEC_ID_MPEG4:
411 case AV_CODEC_ID_MSMPEG4V1:
412 case AV_CODEC_ID_MSMPEG4V2:
413 case AV_CODEC_ID_MSMPEG4V3:
414 if (avctx->rc_max_rate >= 15000000) {
415 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
416 } else if(avctx->rc_max_rate >= 2000000) {
417 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
418 } else if(avctx->rc_max_rate >= 384000) {
419 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
421 avctx->rc_buffer_size = 40;
422 avctx->rc_buffer_size *= 16384;
425 if (avctx->rc_buffer_size) {
426 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
430 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
431 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
432 return AVERROR(EINVAL);
435 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
436 av_log(avctx, AV_LOG_INFO,
437 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
440 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
441 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
442 return AVERROR(EINVAL);
445 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
446 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
447 return AVERROR(EINVAL);
450 if (avctx->rc_max_rate &&
451 avctx->rc_max_rate == avctx->bit_rate &&
452 avctx->rc_max_rate != avctx->rc_min_rate) {
453 av_log(avctx, AV_LOG_INFO,
454 "impossible bitrate constraints, this will fail\n");
457 if (avctx->rc_buffer_size &&
458 avctx->bit_rate * (int64_t)avctx->time_base.num >
459 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
460 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
461 return AVERROR(EINVAL);
464 if (!s->fixed_qscale &&
465 avctx->bit_rate * av_q2d(avctx->time_base) >
466 avctx->bit_rate_tolerance) {
467 av_log(avctx, AV_LOG_WARNING,
468 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
469 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
472 if (avctx->rc_max_rate &&
473 avctx->rc_min_rate == avctx->rc_max_rate &&
474 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
475 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
476 90000LL * (avctx->rc_buffer_size - 1) >
477 avctx->rc_max_rate * 0xFFFFLL) {
478 av_log(avctx, AV_LOG_INFO,
479 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
480 "specified vbv buffer is too large for the given bitrate!\n");
483 if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
484 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
485 s->codec_id != AV_CODEC_ID_FLV1) {
486 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
487 return AVERROR(EINVAL);
490 if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
491 av_log(avctx, AV_LOG_ERROR,
492 "OBMC is only supported with simple mb decision\n");
493 return AVERROR(EINVAL);
496 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
497 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
498 return AVERROR(EINVAL);
501 if (s->max_b_frames &&
502 s->codec_id != AV_CODEC_ID_MPEG4 &&
503 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
504 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
505 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
506 return AVERROR(EINVAL);
508 if (s->max_b_frames < 0) {
509 av_log(avctx, AV_LOG_ERROR,
510 "max b frames must be 0 or positive for mpegvideo based encoders\n");
511 return AVERROR(EINVAL);
514 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
515 s->codec_id == AV_CODEC_ID_H263 ||
516 s->codec_id == AV_CODEC_ID_H263P) &&
517 (avctx->sample_aspect_ratio.num > 255 ||
518 avctx->sample_aspect_ratio.den > 255)) {
519 av_log(avctx, AV_LOG_WARNING,
520 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
521 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
522 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
523 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
526 if ((s->codec_id == AV_CODEC_ID_H263 ||
527 s->codec_id == AV_CODEC_ID_H263P) &&
528 (avctx->width > 2048 ||
529 avctx->height > 1152 )) {
530 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
531 return AVERROR(EINVAL);
533 if ((s->codec_id == AV_CODEC_ID_H263 ||
534 s->codec_id == AV_CODEC_ID_H263P ||
535 s->codec_id == AV_CODEC_ID_RV20) &&
536 ((avctx->width &3) ||
537 (avctx->height&3) )) {
538 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
539 return AVERROR(EINVAL);
542 if (s->codec_id == AV_CODEC_ID_RV10 &&
544 avctx->height&15 )) {
545 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
546 return AVERROR(EINVAL);
549 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
550 s->codec_id == AV_CODEC_ID_WMV2) &&
552 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
553 return AVERROR(EINVAL);
556 if ((avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
557 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
558 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
559 return AVERROR(EINVAL);
562 #if FF_API_PRIVATE_OPT
563 FF_DISABLE_DEPRECATION_WARNINGS
564 if (avctx->mpeg_quant)
566 FF_ENABLE_DEPRECATION_WARNINGS
568 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
569 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
570 av_log(avctx, AV_LOG_ERROR,
571 "mpeg2 style quantization not supported by codec\n");
572 return AVERROR(EINVAL);
576 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
577 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
578 return AVERROR(EINVAL);
581 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
582 avctx->mb_decision != FF_MB_DECISION_RD) {
583 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
584 return AVERROR(EINVAL);
587 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
588 (s->codec_id == AV_CODEC_ID_AMV ||
589 s->codec_id == AV_CODEC_ID_MJPEG)) {
590 // Used to produce garbage with MJPEG.
591 av_log(avctx, AV_LOG_ERROR,
592 "QP RD is no longer compatible with MJPEG or AMV\n");
593 return AVERROR(EINVAL);
596 #if FF_API_PRIVATE_OPT
597 FF_DISABLE_DEPRECATION_WARNINGS
598 if (avctx->scenechange_threshold)
599 s->scenechange_threshold = avctx->scenechange_threshold;
600 FF_ENABLE_DEPRECATION_WARNINGS
603 if (s->scenechange_threshold < 1000000000 &&
604 (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
605 av_log(avctx, AV_LOG_ERROR,
606 "closed gop with scene change detection are not supported yet, "
607 "set threshold to 1000000000\n");
608 return AVERROR_PATCHWELCOME;
611 if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
612 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
613 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
614 av_log(avctx, AV_LOG_ERROR,
615 "low delay forcing is only available for mpeg2, "
616 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
617 return AVERROR(EINVAL);
619 if (s->max_b_frames != 0) {
620 av_log(avctx, AV_LOG_ERROR,
621 "B-frames cannot be used with low delay\n");
622 return AVERROR(EINVAL);
626 if (s->q_scale_type == 1) {
627 if (avctx->qmax > 28) {
628 av_log(avctx, AV_LOG_ERROR,
629 "non linear quant only supports qmax <= 28 currently\n");
630 return AVERROR_PATCHWELCOME;
634 if (avctx->slices > 1 &&
635 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
636 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
637 return AVERROR(EINVAL);
640 if (avctx->thread_count > 1 &&
641 s->codec_id != AV_CODEC_ID_MPEG4 &&
642 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
643 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
644 s->codec_id != AV_CODEC_ID_MJPEG &&
645 (s->codec_id != AV_CODEC_ID_H263P)) {
646 av_log(avctx, AV_LOG_ERROR,
647 "multi threaded encoding not supported by codec\n");
648 return AVERROR_PATCHWELCOME;
651 if (avctx->thread_count < 1) {
652 av_log(avctx, AV_LOG_ERROR,
653 "automatic thread number detection not supported by codec, "
655 return AVERROR_PATCHWELCOME;
658 #if FF_API_PRIVATE_OPT
659 FF_DISABLE_DEPRECATION_WARNINGS
660 if (avctx->b_frame_strategy)
661 s->b_frame_strategy = avctx->b_frame_strategy;
662 if (avctx->b_sensitivity != 40)
663 s->b_sensitivity = avctx->b_sensitivity;
664 FF_ENABLE_DEPRECATION_WARNINGS
667 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
668 av_log(avctx, AV_LOG_INFO,
669 "notice: b_frame_strategy only affects the first pass\n");
670 s->b_frame_strategy = 0;
673 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
675 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
676 avctx->time_base.den /= i;
677 avctx->time_base.num /= i;
681 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
682 // (a + x * 3 / 8) / x
683 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
684 s->inter_quant_bias = 0;
686 s->intra_quant_bias = 0;
688 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
691 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
692 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
693 return AVERROR(EINVAL);
696 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
698 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
699 avctx->time_base.den > (1 << 16) - 1) {
700 av_log(avctx, AV_LOG_ERROR,
701 "timebase %d/%d not supported by MPEG 4 standard, "
702 "the maximum admitted value for the timebase denominator "
703 "is %d\n", avctx->time_base.num, avctx->time_base.den,
705 return AVERROR(EINVAL);
707 s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
709 switch (avctx->codec->id) {
710 case AV_CODEC_ID_MPEG1VIDEO:
711 s->out_format = FMT_MPEG1;
712 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
713 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
715 case AV_CODEC_ID_MPEG2VIDEO:
716 s->out_format = FMT_MPEG1;
717 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
718 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
721 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
722 case AV_CODEC_ID_MJPEG:
723 case AV_CODEC_ID_AMV:
724 s->out_format = FMT_MJPEG;
725 s->intra_only = 1; /* force intra only for jpeg */
726 if ((ret = ff_mjpeg_encode_init(s)) < 0)
732 case AV_CODEC_ID_SPEEDHQ:
733 s->out_format = FMT_SPEEDHQ;
734 s->intra_only = 1; /* force intra only for SHQ */
735 if (!CONFIG_SPEEDHQ_ENCODER)
736 return AVERROR_ENCODER_NOT_FOUND;
737 if ((ret = ff_speedhq_encode_init(s)) < 0)
742 case AV_CODEC_ID_H261:
743 if (!CONFIG_H261_ENCODER)
744 return AVERROR_ENCODER_NOT_FOUND;
745 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
746 av_log(avctx, AV_LOG_ERROR,
747 "The specified picture size of %dx%d is not valid for the "
748 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
749 s->width, s->height);
750 return AVERROR(EINVAL);
752 s->out_format = FMT_H261;
755 s->rtp_mode = 0; /* Sliced encoding not supported */
757 case AV_CODEC_ID_H263:
758 if (!CONFIG_H263_ENCODER)
759 return AVERROR_ENCODER_NOT_FOUND;
760 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
761 s->width, s->height) == 8) {
762 av_log(avctx, AV_LOG_ERROR,
763 "The specified picture size of %dx%d is not valid for "
764 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
765 "352x288, 704x576, and 1408x1152. "
766 "Try H.263+.\n", s->width, s->height);
767 return AVERROR(EINVAL);
769 s->out_format = FMT_H263;
773 case AV_CODEC_ID_H263P:
774 s->out_format = FMT_H263;
777 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
778 s->modified_quant = s->h263_aic;
779 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
780 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
783 /* These are just to be sure */
787 case AV_CODEC_ID_FLV1:
788 s->out_format = FMT_H263;
789 s->h263_flv = 2; /* format = 1; 11-bit codes */
790 s->unrestricted_mv = 1;
791 s->rtp_mode = 0; /* don't allow GOB */
795 case AV_CODEC_ID_RV10:
796 s->out_format = FMT_H263;
800 case AV_CODEC_ID_RV20:
801 s->out_format = FMT_H263;
804 s->modified_quant = 1;
808 s->unrestricted_mv = 0;
810 case AV_CODEC_ID_MPEG4:
811 s->out_format = FMT_H263;
813 s->unrestricted_mv = 1;
814 s->low_delay = s->max_b_frames ? 0 : 1;
815 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
817 case AV_CODEC_ID_MSMPEG4V2:
818 s->out_format = FMT_H263;
820 s->unrestricted_mv = 1;
821 s->msmpeg4_version = 2;
825 case AV_CODEC_ID_MSMPEG4V3:
826 s->out_format = FMT_H263;
828 s->unrestricted_mv = 1;
829 s->msmpeg4_version = 3;
830 s->flipflop_rounding = 1;
834 case AV_CODEC_ID_WMV1:
835 s->out_format = FMT_H263;
837 s->unrestricted_mv = 1;
838 s->msmpeg4_version = 4;
839 s->flipflop_rounding = 1;
843 case AV_CODEC_ID_WMV2:
844 s->out_format = FMT_H263;
846 s->unrestricted_mv = 1;
847 s->msmpeg4_version = 5;
848 s->flipflop_rounding = 1;
853 return AVERROR(EINVAL);
856 #if FF_API_PRIVATE_OPT
857 FF_DISABLE_DEPRECATION_WARNINGS
858 if (avctx->noise_reduction)
859 s->noise_reduction = avctx->noise_reduction;
860 FF_ENABLE_DEPRECATION_WARNINGS
863 avctx->has_b_frames = !s->low_delay;
867 s->progressive_frame =
868 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
869 AV_CODEC_FLAG_INTERLACED_ME) ||
874 if ((ret = ff_mpv_common_init(s)) < 0)
877 ff_fdctdsp_init(&s->fdsp, avctx);
878 ff_me_cmp_init(&s->mecc, avctx);
879 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
880 ff_pixblockdsp_init(&s->pdsp, avctx);
881 ff_qpeldsp_init(&s->qdsp);
883 if (s->msmpeg4_version) {
884 int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
885 if (!(s->ac_stats = av_mallocz(ac_stats_size)))
886 return AVERROR(ENOMEM);
889 if (!(avctx->stats_out = av_mallocz(256)) ||
890 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
891 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
892 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
893 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
894 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
895 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
896 !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
897 !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
898 return AVERROR(ENOMEM);
900 if (s->noise_reduction) {
901 if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
902 return AVERROR(ENOMEM);
905 ff_dct_encode_init(s);
907 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
908 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
910 if (s->slice_context_count > 1) {
913 if (avctx->codec_id == AV_CODEC_ID_H263P)
914 s->h263_slice_structured = 1;
917 s->quant_precision = 5;
919 #if FF_API_PRIVATE_OPT
920 FF_DISABLE_DEPRECATION_WARNINGS
921 if (avctx->frame_skip_threshold)
922 s->frame_skip_threshold = avctx->frame_skip_threshold;
923 if (avctx->frame_skip_factor)
924 s->frame_skip_factor = avctx->frame_skip_factor;
925 if (avctx->frame_skip_exp)
926 s->frame_skip_exp = avctx->frame_skip_exp;
927 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
928 s->frame_skip_cmp = avctx->frame_skip_cmp;
929 FF_ENABLE_DEPRECATION_WARNINGS
932 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
933 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
935 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
936 ff_h261_encode_init(s);
937 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
938 ff_h263_encode_init(s);
939 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
940 ff_msmpeg4_encode_init(s);
941 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
942 && s->out_format == FMT_MPEG1)
943 ff_mpeg1_encode_init(s);
946 for (i = 0; i < 64; i++) {
947 int j = s->idsp.idct_permutation[i];
948 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
950 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
951 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
952 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
954 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
955 } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
957 s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
960 s->chroma_intra_matrix[j] =
961 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
962 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
964 if (avctx->intra_matrix)
965 s->intra_matrix[j] = avctx->intra_matrix[i];
966 if (avctx->inter_matrix)
967 s->inter_matrix[j] = avctx->inter_matrix[i];
970 /* precompute matrix */
971 /* for mjpeg, we do include qscale in the matrix */
972 if (s->out_format != FMT_MJPEG) {
973 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
974 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
976 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
977 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
981 if ((ret = ff_rate_control_init(s)) < 0)
984 #if FF_API_PRIVATE_OPT
985 FF_DISABLE_DEPRECATION_WARNINGS
986 if (avctx->brd_scale)
987 s->brd_scale = avctx->brd_scale;
989 if (avctx->prediction_method)
990 s->pred = avctx->prediction_method + 1;
991 FF_ENABLE_DEPRECATION_WARNINGS
994 if (s->b_frame_strategy == 2) {
995 for (i = 0; i < s->max_b_frames + 2; i++) {
996 s->tmp_frames[i] = av_frame_alloc();
997 if (!s->tmp_frames[i])
998 return AVERROR(ENOMEM);
1000 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1001 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1002 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1004 ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1010 cpb_props = ff_add_cpb_side_data(avctx);
1012 return AVERROR(ENOMEM);
1013 cpb_props->max_bitrate = avctx->rc_max_rate;
1014 cpb_props->min_bitrate = avctx->rc_min_rate;
1015 cpb_props->avg_bitrate = avctx->bit_rate;
1016 cpb_props->buffer_size = avctx->rc_buffer_size;
1021 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1023 MpegEncContext *s = avctx->priv_data;
1026 ff_rate_control_uninit(s);
1028 ff_mpv_common_end(s);
1029 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
1030 s->out_format == FMT_MJPEG)
1031 ff_mjpeg_encode_close(s);
1033 av_freep(&avctx->extradata);
1035 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1036 av_frame_free(&s->tmp_frames[i]);
1038 ff_free_picture_tables(&s->new_picture);
1039 ff_mpeg_unref_picture(avctx, &s->new_picture);
1041 av_freep(&avctx->stats_out);
1042 av_freep(&s->ac_stats);
1044 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1045 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1046 s->q_chroma_intra_matrix= NULL;
1047 s->q_chroma_intra_matrix16= NULL;
1048 av_freep(&s->q_intra_matrix);
1049 av_freep(&s->q_inter_matrix);
1050 av_freep(&s->q_intra_matrix16);
1051 av_freep(&s->q_inter_matrix16);
1052 av_freep(&s->input_picture);
1053 av_freep(&s->reordered_input_picture);
1054 av_freep(&s->dct_offset);
1059 static int get_sae(uint8_t *src, int ref, int stride)
1064 for (y = 0; y < 16; y++) {
1065 for (x = 0; x < 16; x++) {
1066 acc += FFABS(src[x + y * stride] - ref);
1073 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1074 uint8_t *ref, int stride)
1080 h = s->height & ~15;
1082 for (y = 0; y < h; y += 16) {
1083 for (x = 0; x < w; x += 16) {
1084 int offset = x + y * stride;
1085 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1087 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1088 int sae = get_sae(src + offset, mean, stride);
1090 acc += sae + 500 < sad;
1096 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1098 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1099 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1100 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1101 &s->linesize, &s->uvlinesize);
1104 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1106 Picture *pic = NULL;
1108 int i, display_picture_number = 0, ret;
1109 int encoding_delay = s->max_b_frames ? s->max_b_frames
1110 : (s->low_delay ? 0 : 1);
1111 int flush_offset = 1;
1116 display_picture_number = s->input_picture_number++;
1118 if (pts != AV_NOPTS_VALUE) {
1119 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1120 int64_t last = s->user_specified_pts;
1123 av_log(s->avctx, AV_LOG_ERROR,
1124 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1126 return AVERROR(EINVAL);
1129 if (!s->low_delay && display_picture_number == 1)
1130 s->dts_delta = pts - last;
1132 s->user_specified_pts = pts;
1134 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1135 s->user_specified_pts =
1136 pts = s->user_specified_pts + 1;
1137 av_log(s->avctx, AV_LOG_INFO,
1138 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1141 pts = display_picture_number;
1145 if (!pic_arg->buf[0] ||
1146 pic_arg->linesize[0] != s->linesize ||
1147 pic_arg->linesize[1] != s->uvlinesize ||
1148 pic_arg->linesize[2] != s->uvlinesize)
1150 if ((s->width & 15) || (s->height & 15))
1152 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1154 if (s->linesize & (STRIDE_ALIGN-1))
1157 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1158 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1160 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1164 pic = &s->picture[i];
1168 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1171 ret = alloc_picture(s, pic, direct);
1176 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1177 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1178 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1181 int h_chroma_shift, v_chroma_shift;
1182 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1186 for (i = 0; i < 3; i++) {
1187 int src_stride = pic_arg->linesize[i];
1188 int dst_stride = i ? s->uvlinesize : s->linesize;
1189 int h_shift = i ? h_chroma_shift : 0;
1190 int v_shift = i ? v_chroma_shift : 0;
1191 int w = s->width >> h_shift;
1192 int h = s->height >> v_shift;
1193 uint8_t *src = pic_arg->data[i];
1194 uint8_t *dst = pic->f->data[i];
1197 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1198 && !s->progressive_sequence
1199 && FFALIGN(s->height, 32) - s->height > 16)
1202 if (!s->avctx->rc_buffer_size)
1203 dst += INPLACE_OFFSET;
1205 if (src_stride == dst_stride)
1206 memcpy(dst, src, src_stride * h);
1209 uint8_t *dst2 = dst;
1211 memcpy(dst2, src, w);
1216 if ((s->width & 15) || (s->height & (vpad-1))) {
1217 s->mpvencdsp.draw_edges(dst, dst_stride,
1227 ret = av_frame_copy_props(pic->f, pic_arg);
1231 pic->f->display_picture_number = display_picture_number;
1232 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1234 /* Flushing: When we have not received enough input frames,
1235 * ensure s->input_picture[0] contains the first picture */
1236 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1237 if (s->input_picture[flush_offset])
1240 if (flush_offset <= 1)
1243 encoding_delay = encoding_delay - flush_offset + 1;
1246 /* shift buffer entries */
1247 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1248 s->input_picture[i - flush_offset] = s->input_picture[i];
1250 s->input_picture[encoding_delay] = (Picture*) pic;
1255 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1259 int64_t score64 = 0;
1261 for (plane = 0; plane < 3; plane++) {
1262 const int stride = p->f->linesize[plane];
1263 const int bw = plane ? 1 : 2;
1264 for (y = 0; y < s->mb_height * bw; y++) {
1265 for (x = 0; x < s->mb_width * bw; x++) {
1266 int off = p->shared ? 0 : 16;
1267 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1268 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1269 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1271 switch (FFABS(s->frame_skip_exp)) {
1272 case 0: score = FFMAX(score, v); break;
1273 case 1: score += FFABS(v); break;
1274 case 2: score64 += v * (int64_t)v; break;
1275 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1276 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1285 if (s->frame_skip_exp < 0)
1286 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1287 -1.0/s->frame_skip_exp);
1289 if (score64 < s->frame_skip_threshold)
1291 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1296 static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
1301 ret = avcodec_send_frame(c, frame);
1306 ret = avcodec_receive_packet(c, pkt);
1309 av_packet_unref(pkt);
1310 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1317 static int estimate_best_b_count(MpegEncContext *s)
1319 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1321 const int scale = s->brd_scale;
1322 int width = s->width >> scale;
1323 int height = s->height >> scale;
1324 int i, j, out_size, p_lambda, b_lambda, lambda2;
1325 int64_t best_rd = INT64_MAX;
1326 int best_b_count = -1;
1329 av_assert0(scale >= 0 && scale <= 3);
1331 pkt = av_packet_alloc();
1333 return AVERROR(ENOMEM);
1336 //s->next_picture_ptr->quality;
1337 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1338 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1339 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1340 if (!b_lambda) // FIXME we should do this somewhere else
1341 b_lambda = p_lambda;
1342 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1345 for (i = 0; i < s->max_b_frames + 2; i++) {
1346 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1347 s->next_picture_ptr;
1350 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1351 pre_input = *pre_input_ptr;
1352 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1354 if (!pre_input.shared && i) {
1355 data[0] += INPLACE_OFFSET;
1356 data[1] += INPLACE_OFFSET;
1357 data[2] += INPLACE_OFFSET;
1360 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1361 s->tmp_frames[i]->linesize[0],
1363 pre_input.f->linesize[0],
1365 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1366 s->tmp_frames[i]->linesize[1],
1368 pre_input.f->linesize[1],
1369 width >> 1, height >> 1);
1370 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1371 s->tmp_frames[i]->linesize[2],
1373 pre_input.f->linesize[2],
1374 width >> 1, height >> 1);
1378 for (j = 0; j < s->max_b_frames + 1; j++) {
1382 if (!s->input_picture[j])
1385 c = avcodec_alloc_context3(NULL);
1387 ret = AVERROR(ENOMEM);
1393 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1394 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1395 c->mb_decision = s->avctx->mb_decision;
1396 c->me_cmp = s->avctx->me_cmp;
1397 c->mb_cmp = s->avctx->mb_cmp;
1398 c->me_sub_cmp = s->avctx->me_sub_cmp;
1399 c->pix_fmt = AV_PIX_FMT_YUV420P;
1400 c->time_base = s->avctx->time_base;
1401 c->max_b_frames = s->max_b_frames;
1403 ret = avcodec_open2(c, codec, NULL);
1408 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1409 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1411 out_size = encode_frame(c, s->tmp_frames[0], pkt);
1417 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1419 for (i = 0; i < s->max_b_frames + 1; i++) {
1420 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1422 s->tmp_frames[i + 1]->pict_type = is_p ?
1423 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1424 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1426 out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1432 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1435 /* get the delayed frames */
1436 out_size = encode_frame(c, NULL, pkt);
1441 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1443 rd += c->error[0] + c->error[1] + c->error[2];
1451 avcodec_free_context(&c);
1452 av_packet_unref(pkt);
1459 av_packet_free(&pkt);
1461 return best_b_count;
1464 static int select_input_picture(MpegEncContext *s)
1468 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1469 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1470 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1472 /* set next picture type & ordering */
1473 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1474 if (s->frame_skip_threshold || s->frame_skip_factor) {
1475 if (s->picture_in_gop_number < s->gop_size &&
1476 s->next_picture_ptr &&
1477 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1478 // FIXME check that the gop check above is +-1 correct
1479 av_frame_unref(s->input_picture[0]->f);
1481 ff_vbv_update(s, 0);
1487 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1488 !s->next_picture_ptr || s->intra_only) {
1489 s->reordered_input_picture[0] = s->input_picture[0];
1490 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1491 s->reordered_input_picture[0]->f->coded_picture_number =
1492 s->coded_picture_number++;
1496 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1497 for (i = 0; i < s->max_b_frames + 1; i++) {
1498 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1500 if (pict_num >= s->rc_context.num_entries)
1502 if (!s->input_picture[i]) {
1503 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1507 s->input_picture[i]->f->pict_type =
1508 s->rc_context.entry[pict_num].new_pict_type;
1512 if (s->b_frame_strategy == 0) {
1513 b_frames = s->max_b_frames;
1514 while (b_frames && !s->input_picture[b_frames])
1516 } else if (s->b_frame_strategy == 1) {
1517 for (i = 1; i < s->max_b_frames + 1; i++) {
1518 if (s->input_picture[i] &&
1519 s->input_picture[i]->b_frame_score == 0) {
1520 s->input_picture[i]->b_frame_score =
1522 s->input_picture[i ]->f->data[0],
1523 s->input_picture[i - 1]->f->data[0],
1527 for (i = 0; i < s->max_b_frames + 1; i++) {
1528 if (!s->input_picture[i] ||
1529 s->input_picture[i]->b_frame_score - 1 >
1530 s->mb_num / s->b_sensitivity)
1534 b_frames = FFMAX(0, i - 1);
1537 for (i = 0; i < b_frames + 1; i++) {
1538 s->input_picture[i]->b_frame_score = 0;
1540 } else if (s->b_frame_strategy == 2) {
1541 b_frames = estimate_best_b_count(s);
1548 for (i = b_frames - 1; i >= 0; i--) {
1549 int type = s->input_picture[i]->f->pict_type;
1550 if (type && type != AV_PICTURE_TYPE_B)
1553 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1554 b_frames == s->max_b_frames) {
1555 av_log(s->avctx, AV_LOG_ERROR,
1556 "warning, too many B-frames in a row\n");
1559 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1560 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1561 s->gop_size > s->picture_in_gop_number) {
1562 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1564 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1566 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1570 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1571 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1574 s->reordered_input_picture[0] = s->input_picture[b_frames];
1575 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1576 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1577 s->reordered_input_picture[0]->f->coded_picture_number =
1578 s->coded_picture_number++;
1579 for (i = 0; i < b_frames; i++) {
1580 s->reordered_input_picture[i + 1] = s->input_picture[i];
1581 s->reordered_input_picture[i + 1]->f->pict_type =
1583 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1584 s->coded_picture_number++;
1589 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1591 if (s->reordered_input_picture[0]) {
1592 s->reordered_input_picture[0]->reference =
1593 s->reordered_input_picture[0]->f->pict_type !=
1594 AV_PICTURE_TYPE_B ? 3 : 0;
1596 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1599 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1600 // input is a shared pix, so we can't modify it -> allocate a new
1601 // one & ensure that the shared one is reuseable
1604 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1607 pic = &s->picture[i];
1609 pic->reference = s->reordered_input_picture[0]->reference;
1610 if (alloc_picture(s, pic, 0) < 0) {
1614 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1618 /* mark us unused / free shared pic */
1619 av_frame_unref(s->reordered_input_picture[0]->f);
1620 s->reordered_input_picture[0]->shared = 0;
1622 s->current_picture_ptr = pic;
1624 // input is not a shared pix -> reuse buffer for current_pix
1625 s->current_picture_ptr = s->reordered_input_picture[0];
1626 for (i = 0; i < 4; i++) {
1627 if (s->new_picture.f->data[i])
1628 s->new_picture.f->data[i] += INPLACE_OFFSET;
1631 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1632 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1633 s->current_picture_ptr)) < 0)
1636 s->picture_number = s->new_picture.f->display_picture_number;
1641 static void frame_end(MpegEncContext *s)
1643 if (s->unrestricted_mv &&
1644 s->current_picture.reference &&
1646 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1647 int hshift = desc->log2_chroma_w;
1648 int vshift = desc->log2_chroma_h;
1649 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1650 s->current_picture.f->linesize[0],
1651 s->h_edge_pos, s->v_edge_pos,
1652 EDGE_WIDTH, EDGE_WIDTH,
1653 EDGE_TOP | EDGE_BOTTOM);
1654 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1655 s->current_picture.f->linesize[1],
1656 s->h_edge_pos >> hshift,
1657 s->v_edge_pos >> vshift,
1658 EDGE_WIDTH >> hshift,
1659 EDGE_WIDTH >> vshift,
1660 EDGE_TOP | EDGE_BOTTOM);
1661 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1662 s->current_picture.f->linesize[2],
1663 s->h_edge_pos >> hshift,
1664 s->v_edge_pos >> vshift,
1665 EDGE_WIDTH >> hshift,
1666 EDGE_WIDTH >> vshift,
1667 EDGE_TOP | EDGE_BOTTOM);
1672 s->last_pict_type = s->pict_type;
1673 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1674 if (s->pict_type!= AV_PICTURE_TYPE_B)
1675 s->last_non_b_pict_type = s->pict_type;
1677 #if FF_API_CODED_FRAME
1678 FF_DISABLE_DEPRECATION_WARNINGS
1679 av_frame_unref(s->avctx->coded_frame);
1680 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1681 FF_ENABLE_DEPRECATION_WARNINGS
1683 #if FF_API_ERROR_FRAME
1684 FF_DISABLE_DEPRECATION_WARNINGS
1685 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1686 sizeof(s->current_picture.encoding_error));
1687 FF_ENABLE_DEPRECATION_WARNINGS
1691 static void update_noise_reduction(MpegEncContext *s)
1695 for (intra = 0; intra < 2; intra++) {
1696 if (s->dct_count[intra] > (1 << 16)) {
1697 for (i = 0; i < 64; i++) {
1698 s->dct_error_sum[intra][i] >>= 1;
1700 s->dct_count[intra] >>= 1;
1703 for (i = 0; i < 64; i++) {
1704 s->dct_offset[intra][i] = (s->noise_reduction *
1705 s->dct_count[intra] +
1706 s->dct_error_sum[intra][i] / 2) /
1707 (s->dct_error_sum[intra][i] + 1);
1712 static int frame_start(MpegEncContext *s)
1716 /* mark & release old frames */
1717 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1718 s->last_picture_ptr != s->next_picture_ptr &&
1719 s->last_picture_ptr->f->buf[0]) {
1720 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1723 s->current_picture_ptr->f->pict_type = s->pict_type;
1724 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1726 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1727 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1728 s->current_picture_ptr)) < 0)
1731 if (s->pict_type != AV_PICTURE_TYPE_B) {
1732 s->last_picture_ptr = s->next_picture_ptr;
1734 s->next_picture_ptr = s->current_picture_ptr;
1737 if (s->last_picture_ptr) {
1738 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1739 if (s->last_picture_ptr->f->buf[0] &&
1740 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1741 s->last_picture_ptr)) < 0)
1744 if (s->next_picture_ptr) {
1745 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1746 if (s->next_picture_ptr->f->buf[0] &&
1747 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1748 s->next_picture_ptr)) < 0)
1752 if (s->picture_structure!= PICT_FRAME) {
1754 for (i = 0; i < 4; i++) {
1755 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1756 s->current_picture.f->data[i] +=
1757 s->current_picture.f->linesize[i];
1759 s->current_picture.f->linesize[i] *= 2;
1760 s->last_picture.f->linesize[i] *= 2;
1761 s->next_picture.f->linesize[i] *= 2;
1765 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1766 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1767 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1768 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1769 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1770 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1772 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1773 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1776 if (s->dct_error_sum) {
1777 av_assert2(s->noise_reduction && s->encoding);
1778 update_noise_reduction(s);
1784 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1785 const AVFrame *pic_arg, int *got_packet)
1787 MpegEncContext *s = avctx->priv_data;
1788 int i, stuffing_count, ret;
1789 int context_count = s->slice_context_count;
1791 s->vbv_ignore_qmax = 0;
1793 s->picture_in_gop_number++;
1795 if (load_input_picture(s, pic_arg) < 0)
1798 if (select_input_picture(s) < 0) {
1803 if (s->new_picture.f->data[0]) {
1804 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1805 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1807 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1808 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1811 s->mb_info_ptr = av_packet_new_side_data(pkt,
1812 AV_PKT_DATA_H263_MB_INFO,
1813 s->mb_width*s->mb_height*12);
1814 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1817 for (i = 0; i < context_count; i++) {
1818 int start_y = s->thread_context[i]->start_mb_y;
1819 int end_y = s->thread_context[i]-> end_mb_y;
1820 int h = s->mb_height;
1821 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1822 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1824 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1827 s->pict_type = s->new_picture.f->pict_type;
1829 ret = frame_start(s);
1833 ret = encode_picture(s, s->picture_number);
1834 if (growing_buffer) {
1835 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1836 pkt->data = s->pb.buf;
1837 pkt->size = avctx->internal->byte_buffer_size;
1842 #if FF_API_STAT_BITS
1843 FF_DISABLE_DEPRECATION_WARNINGS
1844 avctx->header_bits = s->header_bits;
1845 avctx->mv_bits = s->mv_bits;
1846 avctx->misc_bits = s->misc_bits;
1847 avctx->i_tex_bits = s->i_tex_bits;
1848 avctx->p_tex_bits = s->p_tex_bits;
1849 avctx->i_count = s->i_count;
1850 // FIXME f/b_count in avctx
1851 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1852 avctx->skip_count = s->skip_count;
1853 FF_ENABLE_DEPRECATION_WARNINGS
1858 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
1859 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1861 if (avctx->rc_buffer_size) {
1862 RateControlContext *rcc = &s->rc_context;
1863 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1864 int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1865 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1867 if (put_bits_count(&s->pb) > max_size &&
1868 s->lambda < s->lmax) {
1869 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1870 (s->qscale + 1) / s->qscale);
1871 if (s->adaptive_quant) {
1873 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1874 s->lambda_table[i] =
1875 FFMAX(s->lambda_table[i] + min_step,
1876 s->lambda_table[i] * (s->qscale + 1) /
1879 s->mb_skipped = 0; // done in frame_start()
1880 // done in encode_picture() so we must undo it
1881 if (s->pict_type == AV_PICTURE_TYPE_P) {
1882 if (s->flipflop_rounding ||
1883 s->codec_id == AV_CODEC_ID_H263P ||
1884 s->codec_id == AV_CODEC_ID_MPEG4)
1885 s->no_rounding ^= 1;
1887 if (s->pict_type != AV_PICTURE_TYPE_B) {
1888 s->time_base = s->last_time_base;
1889 s->last_non_b_time = s->time - s->pp_time;
1891 for (i = 0; i < context_count; i++) {
1892 PutBitContext *pb = &s->thread_context[i]->pb;
1893 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1895 s->vbv_ignore_qmax = 1;
1896 av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1900 av_assert0(avctx->rc_max_rate);
1903 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1904 ff_write_pass1_stats(s);
1906 for (i = 0; i < 4; i++) {
1907 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1908 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1910 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1911 s->current_picture_ptr->encoding_error,
1912 (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1915 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1916 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1917 s->misc_bits + s->i_tex_bits +
1919 flush_put_bits(&s->pb);
1920 s->frame_bits = put_bits_count(&s->pb);
1922 stuffing_count = ff_vbv_update(s, s->frame_bits);
1923 s->stuffing_bits = 8*stuffing_count;
1924 if (stuffing_count) {
1925 if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1926 av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1930 switch (s->codec_id) {
1931 case AV_CODEC_ID_MPEG1VIDEO:
1932 case AV_CODEC_ID_MPEG2VIDEO:
1933 while (stuffing_count--) {
1934 put_bits(&s->pb, 8, 0);
1937 case AV_CODEC_ID_MPEG4:
1938 put_bits(&s->pb, 16, 0);
1939 put_bits(&s->pb, 16, 0x1C3);
1940 stuffing_count -= 4;
1941 while (stuffing_count--) {
1942 put_bits(&s->pb, 8, 0xFF);
1946 av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1948 flush_put_bits(&s->pb);
1949 s->frame_bits = put_bits_count(&s->pb);
1952 /* update MPEG-1/2 vbv_delay for CBR */
1953 if (avctx->rc_max_rate &&
1954 avctx->rc_min_rate == avctx->rc_max_rate &&
1955 s->out_format == FMT_MPEG1 &&
1956 90000LL * (avctx->rc_buffer_size - 1) <=
1957 avctx->rc_max_rate * 0xFFFFLL) {
1958 AVCPBProperties *props;
1961 int vbv_delay, min_delay;
1962 double inbits = avctx->rc_max_rate *
1963 av_q2d(avctx->time_base);
1964 int minbits = s->frame_bits - 8 *
1965 (s->vbv_delay_ptr - s->pb.buf - 1);
1966 double bits = s->rc_context.buffer_index + minbits - inbits;
1969 av_log(avctx, AV_LOG_ERROR,
1970 "Internal error, negative bits\n");
1972 av_assert1(s->repeat_first_field == 0);
1974 vbv_delay = bits * 90000 / avctx->rc_max_rate;
1975 min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
1978 vbv_delay = FFMAX(vbv_delay, min_delay);
1980 av_assert0(vbv_delay < 0xFFFF);
1982 s->vbv_delay_ptr[0] &= 0xF8;
1983 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1984 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1985 s->vbv_delay_ptr[2] &= 0x07;
1986 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1988 props = av_cpb_properties_alloc(&props_size);
1990 return AVERROR(ENOMEM);
1991 props->vbv_delay = vbv_delay * 300;
1993 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
1994 (uint8_t*)props, props_size);
2000 #if FF_API_VBV_DELAY
2001 FF_DISABLE_DEPRECATION_WARNINGS
2002 avctx->vbv_delay = vbv_delay * 300;
2003 FF_ENABLE_DEPRECATION_WARNINGS
2006 s->total_bits += s->frame_bits;
2007 #if FF_API_STAT_BITS
2008 FF_DISABLE_DEPRECATION_WARNINGS
2009 avctx->frame_bits = s->frame_bits;
2010 FF_ENABLE_DEPRECATION_WARNINGS
2014 pkt->pts = s->current_picture.f->pts;
2015 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2016 if (!s->current_picture.f->coded_picture_number)
2017 pkt->dts = pkt->pts - s->dts_delta;
2019 pkt->dts = s->reordered_pts;
2020 s->reordered_pts = pkt->pts;
2022 pkt->dts = pkt->pts;
2023 if (s->current_picture.f->key_frame)
2024 pkt->flags |= AV_PKT_FLAG_KEY;
2026 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2031 /* release non-reference frames */
2032 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2033 if (!s->picture[i].reference)
2034 ff_mpeg_unref_picture(avctx, &s->picture[i]);
2037 av_assert1((s->frame_bits & 7) == 0);
2039 pkt->size = s->frame_bits / 8;
2040 *got_packet = !!pkt->size;
2044 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2045 int n, int threshold)
2047 static const char tab[64] = {
2048 3, 2, 2, 1, 1, 1, 1, 1,
2049 1, 1, 1, 1, 1, 1, 1, 1,
2050 1, 1, 1, 1, 1, 1, 1, 1,
2051 0, 0, 0, 0, 0, 0, 0, 0,
2052 0, 0, 0, 0, 0, 0, 0, 0,
2053 0, 0, 0, 0, 0, 0, 0, 0,
2054 0, 0, 0, 0, 0, 0, 0, 0,
2055 0, 0, 0, 0, 0, 0, 0, 0
2060 int16_t *block = s->block[n];
2061 const int last_index = s->block_last_index[n];
2064 if (threshold < 0) {
2066 threshold = -threshold;
2070 /* Are all we could set to zero already zero? */
2071 if (last_index <= skip_dc - 1)
2074 for (i = 0; i <= last_index; i++) {
2075 const int j = s->intra_scantable.permutated[i];
2076 const int level = FFABS(block[j]);
2078 if (skip_dc && i == 0)
2082 } else if (level > 1) {
2088 if (score >= threshold)
2090 for (i = skip_dc; i <= last_index; i++) {
2091 const int j = s->intra_scantable.permutated[i];
2095 s->block_last_index[n] = 0;
2097 s->block_last_index[n] = -1;
2100 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2104 const int maxlevel = s->max_qcoeff;
2105 const int minlevel = s->min_qcoeff;
2109 i = 1; // skip clipping of intra dc
2113 for (; i <= last_index; i++) {
2114 const int j = s->intra_scantable.permutated[i];
2115 int level = block[j];
2117 if (level > maxlevel) {
2120 } else if (level < minlevel) {
2128 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2129 av_log(s->avctx, AV_LOG_INFO,
2130 "warning, clipping %d dct coefficients to %d..%d\n",
2131 overflow, minlevel, maxlevel);
2134 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2138 for (y = 0; y < 8; y++) {
2139 for (x = 0; x < 8; x++) {
2145 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2146 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2147 int v = ptr[x2 + y2 * stride];
2153 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2158 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2159 int motion_x, int motion_y,
2160 int mb_block_height,
2164 int16_t weight[12][64];
2165 int16_t orig[12][64];
2166 const int mb_x = s->mb_x;
2167 const int mb_y = s->mb_y;
2170 int dct_offset = s->linesize * 8; // default for progressive frames
2171 int uv_dct_offset = s->uvlinesize * 8;
2172 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2173 ptrdiff_t wrap_y, wrap_c;
2175 for (i = 0; i < mb_block_count; i++)
2176 skip_dct[i] = s->skipdct;
2178 if (s->adaptive_quant) {
2179 const int last_qp = s->qscale;
2180 const int mb_xy = mb_x + mb_y * s->mb_stride;
2182 s->lambda = s->lambda_table[mb_xy];
2185 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2186 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2187 s->dquant = s->qscale - last_qp;
2189 if (s->out_format == FMT_H263) {
2190 s->dquant = av_clip(s->dquant, -2, 2);
2192 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2194 if (s->pict_type == AV_PICTURE_TYPE_B) {
2195 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2198 if (s->mv_type == MV_TYPE_8X8)
2204 ff_set_qscale(s, last_qp + s->dquant);
2205 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2206 ff_set_qscale(s, s->qscale + s->dquant);
2208 wrap_y = s->linesize;
2209 wrap_c = s->uvlinesize;
2210 ptr_y = s->new_picture.f->data[0] +
2211 (mb_y * 16 * wrap_y) + mb_x * 16;
2212 ptr_cb = s->new_picture.f->data[1] +
2213 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2214 ptr_cr = s->new_picture.f->data[2] +
2215 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2217 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2218 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2219 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2220 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2221 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2223 16, 16, mb_x * 16, mb_y * 16,
2224 s->width, s->height);
2226 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2228 mb_block_width, mb_block_height,
2229 mb_x * mb_block_width, mb_y * mb_block_height,
2231 ptr_cb = ebuf + 16 * wrap_y;
2232 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2234 mb_block_width, mb_block_height,
2235 mb_x * mb_block_width, mb_y * mb_block_height,
2237 ptr_cr = ebuf + 16 * wrap_y + 16;
2241 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2242 int progressive_score, interlaced_score;
2244 s->interlaced_dct = 0;
2245 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2246 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2247 NULL, wrap_y, 8) - 400;
2249 if (progressive_score > 0) {
2250 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2251 NULL, wrap_y * 2, 8) +
2252 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2253 NULL, wrap_y * 2, 8);
2254 if (progressive_score > interlaced_score) {
2255 s->interlaced_dct = 1;
2257 dct_offset = wrap_y;
2258 uv_dct_offset = wrap_c;
2260 if (s->chroma_format == CHROMA_422 ||
2261 s->chroma_format == CHROMA_444)
2267 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2268 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2269 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2270 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2272 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2276 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2277 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2278 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2279 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2280 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2281 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2282 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2283 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2284 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2285 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2286 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2287 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2291 op_pixels_func (*op_pix)[4];
2292 qpel_mc_func (*op_qpix)[16];
2293 uint8_t *dest_y, *dest_cb, *dest_cr;
2295 dest_y = s->dest[0];
2296 dest_cb = s->dest[1];
2297 dest_cr = s->dest[2];
2299 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2300 op_pix = s->hdsp.put_pixels_tab;
2301 op_qpix = s->qdsp.put_qpel_pixels_tab;
2303 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2304 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2307 if (s->mv_dir & MV_DIR_FORWARD) {
2308 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2309 s->last_picture.f->data,
2311 op_pix = s->hdsp.avg_pixels_tab;
2312 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2314 if (s->mv_dir & MV_DIR_BACKWARD) {
2315 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2316 s->next_picture.f->data,
2320 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2321 int progressive_score, interlaced_score;
2323 s->interlaced_dct = 0;
2324 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2325 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2329 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2330 progressive_score -= 400;
2332 if (progressive_score > 0) {
2333 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2335 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2339 if (progressive_score > interlaced_score) {
2340 s->interlaced_dct = 1;
2342 dct_offset = wrap_y;
2343 uv_dct_offset = wrap_c;
2345 if (s->chroma_format == CHROMA_422)
2351 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2352 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2353 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2354 dest_y + dct_offset, wrap_y);
2355 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2356 dest_y + dct_offset + 8, wrap_y);
2358 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2362 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2363 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2364 if (!s->chroma_y_shift) { /* 422 */
2365 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2366 dest_cb + uv_dct_offset, wrap_c);
2367 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2368 dest_cr + uv_dct_offset, wrap_c);
2371 /* pre quantization */
2372 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2373 2 * s->qscale * s->qscale) {
2375 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2377 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2379 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2380 wrap_y, 8) < 20 * s->qscale)
2382 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2383 wrap_y, 8) < 20 * s->qscale)
2385 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2387 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2389 if (!s->chroma_y_shift) { /* 422 */
2390 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2391 dest_cb + uv_dct_offset,
2392 wrap_c, 8) < 20 * s->qscale)
2394 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2395 dest_cr + uv_dct_offset,
2396 wrap_c, 8) < 20 * s->qscale)
2402 if (s->quantizer_noise_shaping) {
2404 get_visual_weight(weight[0], ptr_y , wrap_y);
2406 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2408 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2410 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2412 get_visual_weight(weight[4], ptr_cb , wrap_c);
2414 get_visual_weight(weight[5], ptr_cr , wrap_c);
2415 if (!s->chroma_y_shift) { /* 422 */
2417 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2420 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2423 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2426 /* DCT & quantize */
2427 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2429 for (i = 0; i < mb_block_count; i++) {
2432 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2433 // FIXME we could decide to change to quantizer instead of
2435 // JS: I don't think that would be a good idea it could lower
2436 // quality instead of improve it. Just INTRADC clipping
2437 // deserves changes in quantizer
2439 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2441 s->block_last_index[i] = -1;
2443 if (s->quantizer_noise_shaping) {
2444 for (i = 0; i < mb_block_count; i++) {
2446 s->block_last_index[i] =
2447 dct_quantize_refine(s, s->block[i], weight[i],
2448 orig[i], i, s->qscale);
2453 if (s->luma_elim_threshold && !s->mb_intra)
2454 for (i = 0; i < 4; i++)
2455 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2456 if (s->chroma_elim_threshold && !s->mb_intra)
2457 for (i = 4; i < mb_block_count; i++)
2458 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2460 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2461 for (i = 0; i < mb_block_count; i++) {
2462 if (s->block_last_index[i] == -1)
2463 s->coded_score[i] = INT_MAX / 256;
2468 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2469 s->block_last_index[4] =
2470 s->block_last_index[5] = 0;
2472 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2473 if (!s->chroma_y_shift) { /* 422 / 444 */
2474 for (i=6; i<12; i++) {
2475 s->block_last_index[i] = 0;
2476 s->block[i][0] = s->block[4][0];
2481 // non c quantize code returns incorrect block_last_index FIXME
2482 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2483 for (i = 0; i < mb_block_count; i++) {
2485 if (s->block_last_index[i] > 0) {
2486 for (j = 63; j > 0; j--) {
2487 if (s->block[i][s->intra_scantable.permutated[j]])
2490 s->block_last_index[i] = j;
2495 /* huffman encode */
2496 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2497 case AV_CODEC_ID_MPEG1VIDEO:
2498 case AV_CODEC_ID_MPEG2VIDEO:
2499 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2500 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2502 case AV_CODEC_ID_MPEG4:
2503 if (CONFIG_MPEG4_ENCODER)
2504 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2506 case AV_CODEC_ID_MSMPEG4V2:
2507 case AV_CODEC_ID_MSMPEG4V3:
2508 case AV_CODEC_ID_WMV1:
2509 if (CONFIG_MSMPEG4_ENCODER)
2510 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2512 case AV_CODEC_ID_WMV2:
2513 if (CONFIG_WMV2_ENCODER)
2514 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2516 case AV_CODEC_ID_H261:
2517 if (CONFIG_H261_ENCODER)
2518 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2520 case AV_CODEC_ID_H263:
2521 case AV_CODEC_ID_H263P:
2522 case AV_CODEC_ID_FLV1:
2523 case AV_CODEC_ID_RV10:
2524 case AV_CODEC_ID_RV20:
2525 if (CONFIG_H263_ENCODER)
2526 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2528 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2529 case AV_CODEC_ID_MJPEG:
2530 case AV_CODEC_ID_AMV:
2531 ff_mjpeg_encode_mb(s, s->block);
2534 case AV_CODEC_ID_SPEEDHQ:
2535 if (CONFIG_SPEEDHQ_ENCODER)
2536 ff_speedhq_encode_mb(s, s->block);
2543 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2545 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2546 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2547 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2550 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2553 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2556 d->mb_skip_run= s->mb_skip_run;
2558 d->last_dc[i] = s->last_dc[i];
2561 d->mv_bits= s->mv_bits;
2562 d->i_tex_bits= s->i_tex_bits;
2563 d->p_tex_bits= s->p_tex_bits;
2564 d->i_count= s->i_count;
2565 d->f_count= s->f_count;
2566 d->b_count= s->b_count;
2567 d->skip_count= s->skip_count;
2568 d->misc_bits= s->misc_bits;
2572 d->qscale= s->qscale;
2573 d->dquant= s->dquant;
2575 d->esc3_level_length= s->esc3_level_length;
2578 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2581 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2582 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2585 d->mb_skip_run= s->mb_skip_run;
2587 d->last_dc[i] = s->last_dc[i];
2590 d->mv_bits= s->mv_bits;
2591 d->i_tex_bits= s->i_tex_bits;
2592 d->p_tex_bits= s->p_tex_bits;
2593 d->i_count= s->i_count;
2594 d->f_count= s->f_count;
2595 d->b_count= s->b_count;
2596 d->skip_count= s->skip_count;
2597 d->misc_bits= s->misc_bits;
2599 d->mb_intra= s->mb_intra;
2600 d->mb_skipped= s->mb_skipped;
2601 d->mv_type= s->mv_type;
2602 d->mv_dir= s->mv_dir;
2604 if(s->data_partitioning){
2606 d->tex_pb= s->tex_pb;
2610 d->block_last_index[i]= s->block_last_index[i];
2611 d->interlaced_dct= s->interlaced_dct;
2612 d->qscale= s->qscale;
2614 d->esc3_level_length= s->esc3_level_length;
2617 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2618 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2619 int *dmin, int *next_block, int motion_x, int motion_y)
2622 uint8_t *dest_backup[3];
2624 copy_context_before_encode(s, backup, type);
2626 s->block= s->blocks[*next_block];
2627 s->pb= pb[*next_block];
2628 if(s->data_partitioning){
2629 s->pb2 = pb2 [*next_block];
2630 s->tex_pb= tex_pb[*next_block];
2634 memcpy(dest_backup, s->dest, sizeof(s->dest));
2635 s->dest[0] = s->sc.rd_scratchpad;
2636 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2637 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2638 av_assert0(s->linesize >= 32); //FIXME
2641 encode_mb(s, motion_x, motion_y);
2643 score= put_bits_count(&s->pb);
2644 if(s->data_partitioning){
2645 score+= put_bits_count(&s->pb2);
2646 score+= put_bits_count(&s->tex_pb);
2649 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2650 ff_mpv_reconstruct_mb(s, s->block);
2652 score *= s->lambda2;
2653 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2657 memcpy(s->dest, dest_backup, sizeof(s->dest));
2664 copy_context_after_encode(best, s, type);
2668 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2669 const uint32_t *sq = ff_square_tab + 256;
2674 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2675 else if(w==8 && h==8)
2676 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2680 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2689 static int sse_mb(MpegEncContext *s){
2693 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2694 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2697 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2698 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2699 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2700 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2702 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2703 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2704 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2707 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2708 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2709 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2712 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2713 MpegEncContext *s= *(void**)arg;
2717 s->me.dia_size= s->avctx->pre_dia_size;
2718 s->first_slice_line=1;
2719 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2720 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2721 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2723 s->first_slice_line=0;
2731 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2732 MpegEncContext *s= *(void**)arg;
2734 s->me.dia_size= s->avctx->dia_size;
2735 s->first_slice_line=1;
2736 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2737 s->mb_x=0; //for block init below
2738 ff_init_block_index(s);
2739 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2740 s->block_index[0]+=2;
2741 s->block_index[1]+=2;
2742 s->block_index[2]+=2;
2743 s->block_index[3]+=2;
2745 /* compute motion vector & mb_type and store in context */
2746 if(s->pict_type==AV_PICTURE_TYPE_B)
2747 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2749 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2751 s->first_slice_line=0;
2756 static int mb_var_thread(AVCodecContext *c, void *arg){
2757 MpegEncContext *s= *(void**)arg;
2760 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2761 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2764 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2766 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2768 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2769 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2771 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2772 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2773 s->me.mb_var_sum_temp += varc;
2779 static void write_slice_end(MpegEncContext *s){
2780 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2781 if(s->partitioned_frame){
2782 ff_mpeg4_merge_partitions(s);
2785 ff_mpeg4_stuffing(&s->pb);
2786 } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2787 s->out_format == FMT_MJPEG) {
2788 ff_mjpeg_encode_stuffing(s);
2789 } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2790 ff_speedhq_end_slice(s);
2793 flush_put_bits(&s->pb);
2795 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2796 s->misc_bits+= get_bits_diff(s);
2799 static void write_mb_info(MpegEncContext *s)
2801 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2802 int offset = put_bits_count(&s->pb);
2803 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2804 int gobn = s->mb_y / s->gob_index;
2806 if (CONFIG_H263_ENCODER)
2807 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2808 bytestream_put_le32(&ptr, offset);
2809 bytestream_put_byte(&ptr, s->qscale);
2810 bytestream_put_byte(&ptr, gobn);
2811 bytestream_put_le16(&ptr, mba);
2812 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2813 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2814 /* 4MV not implemented */
2815 bytestream_put_byte(&ptr, 0); /* hmv2 */
2816 bytestream_put_byte(&ptr, 0); /* vmv2 */
2819 static void update_mb_info(MpegEncContext *s, int startcode)
2823 if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2824 s->mb_info_size += 12;
2825 s->prev_mb_info = s->last_mb_info;
2828 s->prev_mb_info = put_bytes_count(&s->pb, 0);
2829 /* This might have incremented mb_info_size above, and we return without
2830 * actually writing any info into that slot yet. But in that case,
2831 * this will be called again at the start of the after writing the
2832 * start code, actually writing the mb info. */
2836 s->last_mb_info = put_bytes_count(&s->pb, 0);
2837 if (!s->mb_info_size)
2838 s->mb_info_size += 12;
2842 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2844 if (put_bytes_left(&s->pb, 0) < threshold
2845 && s->slice_context_count == 1
2846 && s->pb.buf == s->avctx->internal->byte_buffer) {
2847 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2848 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2850 uint8_t *new_buffer = NULL;
2851 int new_buffer_size = 0;
2853 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2854 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2855 return AVERROR(ENOMEM);
2860 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2861 s->avctx->internal->byte_buffer_size + size_increase);
2863 return AVERROR(ENOMEM);
2865 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2866 av_free(s->avctx->internal->byte_buffer);
2867 s->avctx->internal->byte_buffer = new_buffer;
2868 s->avctx->internal->byte_buffer_size = new_buffer_size;
2869 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2870 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2871 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2873 if (put_bytes_left(&s->pb, 0) < threshold)
2874 return AVERROR(EINVAL);
2878 static int encode_thread(AVCodecContext *c, void *arg){
2879 MpegEncContext *s= *(void**)arg;
2880 int mb_x, mb_y, mb_y_order;
2881 int chr_h= 16>>s->chroma_y_shift;
2883 MpegEncContext best_s = { 0 }, backup_s;
2884 uint8_t bit_buf[2][MAX_MB_BYTES];
2885 uint8_t bit_buf2[2][MAX_MB_BYTES];
2886 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2887 PutBitContext pb[2], pb2[2], tex_pb[2];
2890 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2891 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2892 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2895 s->last_bits= put_bits_count(&s->pb);
2906 /* init last dc values */
2907 /* note: quant matrix value (8) is implied here */
2908 s->last_dc[i] = 128 << s->intra_dc_precision;
2910 s->current_picture.encoding_error[i] = 0;
2912 if(s->codec_id==AV_CODEC_ID_AMV){
2913 s->last_dc[0] = 128*8/13;
2914 s->last_dc[1] = 128*8/14;
2915 s->last_dc[2] = 128*8/14;
2918 memset(s->last_mv, 0, sizeof(s->last_mv));
2922 switch(s->codec_id){
2923 case AV_CODEC_ID_H263:
2924 case AV_CODEC_ID_H263P:
2925 case AV_CODEC_ID_FLV1:
2926 if (CONFIG_H263_ENCODER)
2927 s->gob_index = H263_GOB_HEIGHT(s->height);
2929 case AV_CODEC_ID_MPEG4:
2930 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2931 ff_mpeg4_init_partitions(s);
2937 s->first_slice_line = 1;
2938 s->ptr_lastgob = s->pb.buf;
2939 for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2940 if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
2942 mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
2943 if (first_in_slice && mb_y_order != s->start_mb_y)
2944 ff_speedhq_end_slice(s);
2945 s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
2952 ff_set_qscale(s, s->qscale);
2953 ff_init_block_index(s);
2955 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2956 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2957 int mb_type= s->mb_type[xy];
2961 int size_increase = s->avctx->internal->byte_buffer_size/4
2962 + s->mb_width*MAX_MB_BYTES;
2964 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2965 if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
2966 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2969 if(s->data_partitioning){
2970 if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
2971 put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
2972 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2978 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2979 ff_update_block_index(s);
2981 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2982 ff_h261_reorder_mb_index(s);
2983 xy= s->mb_y*s->mb_stride + s->mb_x;
2984 mb_type= s->mb_type[xy];
2987 /* write gob / video packet header */
2989 int current_packet_size, is_gob_start;
2991 current_packet_size = put_bytes_count(&s->pb, 1)
2992 - (s->ptr_lastgob - s->pb.buf);
2994 is_gob_start = s->rtp_payload_size &&
2995 current_packet_size >= s->rtp_payload_size &&
2998 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3000 switch(s->codec_id){
3001 case AV_CODEC_ID_H263:
3002 case AV_CODEC_ID_H263P:
3003 if(!s->h263_slice_structured)
3004 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3006 case AV_CODEC_ID_MPEG2VIDEO:
3007 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3008 case AV_CODEC_ID_MPEG1VIDEO:
3009 if(s->mb_skip_run) is_gob_start=0;
3011 case AV_CODEC_ID_MJPEG:
3012 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3017 if(s->start_mb_y != mb_y || mb_x!=0){
3020 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3021 ff_mpeg4_init_partitions(s);
3025 av_assert2((put_bits_count(&s->pb)&7) == 0);
3026 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3028 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3029 int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
3030 int d = 100 / s->error_rate;
3032 current_packet_size=0;
3033 s->pb.buf_ptr= s->ptr_lastgob;
3034 av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3038 #if FF_API_RTP_CALLBACK
3039 FF_DISABLE_DEPRECATION_WARNINGS
3040 if (s->avctx->rtp_callback){
3041 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3042 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3044 FF_ENABLE_DEPRECATION_WARNINGS
3046 update_mb_info(s, 1);
3048 switch(s->codec_id){
3049 case AV_CODEC_ID_MPEG4:
3050 if (CONFIG_MPEG4_ENCODER) {
3051 ff_mpeg4_encode_video_packet_header(s);
3052 ff_mpeg4_clean_buffers(s);
3055 case AV_CODEC_ID_MPEG1VIDEO:
3056 case AV_CODEC_ID_MPEG2VIDEO:
3057 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3058 ff_mpeg1_encode_slice_header(s);
3059 ff_mpeg1_clean_buffers(s);
3062 case AV_CODEC_ID_H263:
3063 case AV_CODEC_ID_H263P:
3064 if (CONFIG_H263_ENCODER)
3065 ff_h263_encode_gob_header(s, mb_y);
3069 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3070 int bits= put_bits_count(&s->pb);
3071 s->misc_bits+= bits - s->last_bits;
3075 s->ptr_lastgob += current_packet_size;
3076 s->first_slice_line=1;
3077 s->resync_mb_x=mb_x;
3078 s->resync_mb_y=mb_y;
3082 if( (s->resync_mb_x == s->mb_x)
3083 && s->resync_mb_y+1 == s->mb_y){
3084 s->first_slice_line=0;
3088 s->dquant=0; //only for QP_RD
3090 update_mb_info(s, 0);
3092 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3094 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3096 copy_context_before_encode(&backup_s, s, -1);
3098 best_s.data_partitioning= s->data_partitioning;
3099 best_s.partitioned_frame= s->partitioned_frame;
3100 if(s->data_partitioning){
3101 backup_s.pb2= s->pb2;
3102 backup_s.tex_pb= s->tex_pb;
3105 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3106 s->mv_dir = MV_DIR_FORWARD;
3107 s->mv_type = MV_TYPE_16X16;
3109 s->mv[0][0][0] = s->p_mv_table[xy][0];
3110 s->mv[0][0][1] = s->p_mv_table[xy][1];
3111 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3112 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3114 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3115 s->mv_dir = MV_DIR_FORWARD;
3116 s->mv_type = MV_TYPE_FIELD;
3119 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3120 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3121 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3123 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3124 &dmin, &next_block, 0, 0);
3126 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3127 s->mv_dir = MV_DIR_FORWARD;
3128 s->mv_type = MV_TYPE_16X16;
3132 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3133 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3135 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3136 s->mv_dir = MV_DIR_FORWARD;
3137 s->mv_type = MV_TYPE_8X8;
3140 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3141 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3143 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3144 &dmin, &next_block, 0, 0);
3146 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3147 s->mv_dir = MV_DIR_FORWARD;
3148 s->mv_type = MV_TYPE_16X16;
3150 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3151 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3152 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3153 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3155 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3156 s->mv_dir = MV_DIR_BACKWARD;
3157 s->mv_type = MV_TYPE_16X16;
3159 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3160 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3161 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3162 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3164 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3165 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3166 s->mv_type = MV_TYPE_16X16;
3168 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3169 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3170 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3171 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3172 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3173 &dmin, &next_block, 0, 0);
3175 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3176 s->mv_dir = MV_DIR_FORWARD;
3177 s->mv_type = MV_TYPE_FIELD;
3180 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3181 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3182 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3184 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3185 &dmin, &next_block, 0, 0);
3187 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3188 s->mv_dir = MV_DIR_BACKWARD;
3189 s->mv_type = MV_TYPE_FIELD;
3192 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3193 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3194 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3196 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3197 &dmin, &next_block, 0, 0);
3199 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3200 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3201 s->mv_type = MV_TYPE_FIELD;
3203 for(dir=0; dir<2; dir++){
3205 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3206 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3207 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3210 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3211 &dmin, &next_block, 0, 0);
3213 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3215 s->mv_type = MV_TYPE_16X16;
3219 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3220 &dmin, &next_block, 0, 0);
3221 if(s->h263_pred || s->h263_aic){
3223 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3225 ff_clean_intra_table_entries(s); //old mode?
3229 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3230 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3231 const int last_qp= backup_s.qscale;
3234 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3235 static const int dquant_tab[4]={-1,1,-2,2};
3236 int storecoefs = s->mb_intra && s->dc_val[0];
3238 av_assert2(backup_s.dquant == 0);
3241 s->mv_dir= best_s.mv_dir;
3242 s->mv_type = MV_TYPE_16X16;
3243 s->mb_intra= best_s.mb_intra;
3244 s->mv[0][0][0] = best_s.mv[0][0][0];
3245 s->mv[0][0][1] = best_s.mv[0][0][1];
3246 s->mv[1][0][0] = best_s.mv[1][0][0];
3247 s->mv[1][0][1] = best_s.mv[1][0][1];
3249 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3250 for(; qpi<4; qpi++){
3251 int dquant= dquant_tab[qpi];
3252 qp= last_qp + dquant;
3253 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3255 backup_s.dquant= dquant;
3258 dc[i]= s->dc_val[0][ s->block_index[i] ];
3259 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3263 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3264 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3265 if(best_s.qscale != qp){
3268 s->dc_val[0][ s->block_index[i] ]= dc[i];
3269 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3276 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3277 int mx= s->b_direct_mv_table[xy][0];
3278 int my= s->b_direct_mv_table[xy][1];
3280 backup_s.dquant = 0;
3281 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3283 ff_mpeg4_set_direct_mv(s, mx, my);
3284 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3285 &dmin, &next_block, mx, my);
3287 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3288 backup_s.dquant = 0;
3289 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3291 ff_mpeg4_set_direct_mv(s, 0, 0);
3292 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3293 &dmin, &next_block, 0, 0);
3295 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3298 coded |= s->block_last_index[i];
3301 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3302 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3303 mx=my=0; //FIXME find the one we actually used
3304 ff_mpeg4_set_direct_mv(s, mx, my);
3305 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3313 s->mv_dir= best_s.mv_dir;
3314 s->mv_type = best_s.mv_type;
3316 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3317 s->mv[0][0][1] = best_s.mv[0][0][1];
3318 s->mv[1][0][0] = best_s.mv[1][0][0];
3319 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3322 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3323 &dmin, &next_block, mx, my);
3328 s->current_picture.qscale_table[xy] = best_s.qscale;
3330 copy_context_after_encode(s, &best_s, -1);
3332 pb_bits_count= put_bits_count(&s->pb);
3333 flush_put_bits(&s->pb);
3334 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3337 if(s->data_partitioning){
3338 pb2_bits_count= put_bits_count(&s->pb2);
3339 flush_put_bits(&s->pb2);
3340 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3341 s->pb2= backup_s.pb2;
3343 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3344 flush_put_bits(&s->tex_pb);
3345 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3346 s->tex_pb= backup_s.tex_pb;
3348 s->last_bits= put_bits_count(&s->pb);
3350 if (CONFIG_H263_ENCODER &&
3351 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3352 ff_h263_update_motion_val(s);
3354 if(next_block==0){ //FIXME 16 vs linesize16
3355 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3356 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3357 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3360 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3361 ff_mpv_reconstruct_mb(s, s->block);
3363 int motion_x = 0, motion_y = 0;
3364 s->mv_type=MV_TYPE_16X16;
3365 // only one MB-Type possible
3368 case CANDIDATE_MB_TYPE_INTRA:
3371 motion_x= s->mv[0][0][0] = 0;
3372 motion_y= s->mv[0][0][1] = 0;
3374 case CANDIDATE_MB_TYPE_INTER:
3375 s->mv_dir = MV_DIR_FORWARD;
3377 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3378 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3380 case CANDIDATE_MB_TYPE_INTER_I:
3381 s->mv_dir = MV_DIR_FORWARD;
3382 s->mv_type = MV_TYPE_FIELD;
3385 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3386 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3387 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3390 case CANDIDATE_MB_TYPE_INTER4V:
3391 s->mv_dir = MV_DIR_FORWARD;
3392 s->mv_type = MV_TYPE_8X8;
3395 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3396 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3399 case CANDIDATE_MB_TYPE_DIRECT:
3400 if (CONFIG_MPEG4_ENCODER) {
3401 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3403 motion_x=s->b_direct_mv_table[xy][0];
3404 motion_y=s->b_direct_mv_table[xy][1];
3405 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3408 case CANDIDATE_MB_TYPE_DIRECT0:
3409 if (CONFIG_MPEG4_ENCODER) {
3410 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3412 ff_mpeg4_set_direct_mv(s, 0, 0);
3415 case CANDIDATE_MB_TYPE_BIDIR:
3416 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3418 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3419 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3420 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3421 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3423 case CANDIDATE_MB_TYPE_BACKWARD:
3424 s->mv_dir = MV_DIR_BACKWARD;
3426 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3427 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3429 case CANDIDATE_MB_TYPE_FORWARD:
3430 s->mv_dir = MV_DIR_FORWARD;
3432 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3433 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3435 case CANDIDATE_MB_TYPE_FORWARD_I:
3436 s->mv_dir = MV_DIR_FORWARD;
3437 s->mv_type = MV_TYPE_FIELD;
3440 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3441 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3442 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3445 case CANDIDATE_MB_TYPE_BACKWARD_I:
3446 s->mv_dir = MV_DIR_BACKWARD;
3447 s->mv_type = MV_TYPE_FIELD;
3450 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3451 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3452 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3455 case CANDIDATE_MB_TYPE_BIDIR_I:
3456 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3457 s->mv_type = MV_TYPE_FIELD;
3459 for(dir=0; dir<2; dir++){
3461 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3462 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3463 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3468 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3471 encode_mb(s, motion_x, motion_y);
3473 // RAL: Update last macroblock type
3474 s->last_mv_dir = s->mv_dir;
3476 if (CONFIG_H263_ENCODER &&
3477 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3478 ff_h263_update_motion_val(s);
3480 ff_mpv_reconstruct_mb(s, s->block);
3483 /* clean the MV table in IPS frames for direct mode in B-frames */
3484 if(s->mb_intra /* && I,P,S_TYPE */){
3485 s->p_mv_table[xy][0]=0;
3486 s->p_mv_table[xy][1]=0;
3489 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3493 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3494 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3496 s->current_picture.encoding_error[0] += sse(
3497 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3498 s->dest[0], w, h, s->linesize);
3499 s->current_picture.encoding_error[1] += sse(
3500 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3501 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3502 s->current_picture.encoding_error[2] += sse(
3503 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3504 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3507 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3508 ff_h263_loop_filter(s);
3510 ff_dlog(s->avctx, "MB %d %d bits\n",
3511 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3515 //not beautiful here but we must write it before flushing so it has to be here
3516 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3517 ff_msmpeg4_encode_ext_header(s);
3521 #if FF_API_RTP_CALLBACK
3522 FF_DISABLE_DEPRECATION_WARNINGS
3523 /* Send the last GOB if RTP */
3524 if (s->avctx->rtp_callback) {
3525 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3526 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3527 /* Call the RTP callback to send the last GOB */
3529 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3531 FF_ENABLE_DEPRECATION_WARNINGS
3537 #define MERGE(field) dst->field += src->field; src->field=0
3538 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3539 MERGE(me.scene_change_score);
3540 MERGE(me.mc_mb_var_sum_temp);
3541 MERGE(me.mb_var_sum_temp);
3544 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3547 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3548 MERGE(dct_count[1]);
3557 MERGE(er.error_count);
3558 MERGE(padding_bug_score);
3559 MERGE(current_picture.encoding_error[0]);
3560 MERGE(current_picture.encoding_error[1]);
3561 MERGE(current_picture.encoding_error[2]);
3563 if (dst->noise_reduction){
3564 for(i=0; i<64; i++){
3565 MERGE(dct_error_sum[0][i]);
3566 MERGE(dct_error_sum[1][i]);
3570 av_assert1(put_bits_count(&src->pb) % 8 ==0);
3571 av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3572 ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3573 flush_put_bits(&dst->pb);
3576 static int estimate_qp(MpegEncContext *s, int dry_run){
3577 if (s->next_lambda){
3578 s->current_picture_ptr->f->quality =
3579 s->current_picture.f->quality = s->next_lambda;
3580 if(!dry_run) s->next_lambda= 0;
3581 } else if (!s->fixed_qscale) {
3582 int quality = ff_rate_estimate_qscale(s, dry_run);
3583 s->current_picture_ptr->f->quality =
3584 s->current_picture.f->quality = quality;
3585 if (s->current_picture.f->quality < 0)
3589 if(s->adaptive_quant){
3590 switch(s->codec_id){
3591 case AV_CODEC_ID_MPEG4:
3592 if (CONFIG_MPEG4_ENCODER)
3593 ff_clean_mpeg4_qscales(s);
3595 case AV_CODEC_ID_H263:
3596 case AV_CODEC_ID_H263P:
3597 case AV_CODEC_ID_FLV1:
3598 if (CONFIG_H263_ENCODER)
3599 ff_clean_h263_qscales(s);
3602 ff_init_qscale_tab(s);
3605 s->lambda= s->lambda_table[0];
3608 s->lambda = s->current_picture.f->quality;
3613 /* must be called before writing the header */
3614 static void set_frame_distances(MpegEncContext * s){
3615 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3616 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3618 if(s->pict_type==AV_PICTURE_TYPE_B){
3619 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3620 av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3622 s->pp_time= s->time - s->last_non_b_time;
3623 s->last_non_b_time= s->time;
3624 av_assert1(s->picture_number==0 || s->pp_time > 0);
3628 static int encode_picture(MpegEncContext *s, int picture_number)
3632 int context_count = s->slice_context_count;
3634 s->picture_number = picture_number;
3636 /* Reset the average MB variance */
3637 s->me.mb_var_sum_temp =
3638 s->me.mc_mb_var_sum_temp = 0;
3640 /* we need to initialize some time vars before we can encode B-frames */
3641 // RAL: Condition added for MPEG1VIDEO
3642 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3643 set_frame_distances(s);
3644 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3645 ff_set_mpeg4_time(s);
3647 s->me.scene_change_score=0;
3649 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3651 if(s->pict_type==AV_PICTURE_TYPE_I){
3652 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3653 else s->no_rounding=0;
3654 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3655 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3656 s->no_rounding ^= 1;
3659 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3660 if (estimate_qp(s,1) < 0)
3662 ff_get_2pass_fcode(s);
3663 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3664 if(s->pict_type==AV_PICTURE_TYPE_B)
3665 s->lambda= s->last_lambda_for[s->pict_type];
3667 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3671 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3672 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3673 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3674 s->q_chroma_intra_matrix = s->q_intra_matrix;
3675 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3678 s->mb_intra=0; //for the rate distortion & bit compare functions
3679 for(i=1; i<context_count; i++){
3680 ret = ff_update_duplicate_context(s->thread_context[i], s);
3688 /* Estimate motion for every MB */
3689 if(s->pict_type != AV_PICTURE_TYPE_I){
3690 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3691 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3692 if (s->pict_type != AV_PICTURE_TYPE_B) {
3693 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3695 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3699 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3700 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3702 for(i=0; i<s->mb_stride*s->mb_height; i++)
3703 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3705 if(!s->fixed_qscale){
3706 /* finding spatial complexity for I-frame rate control */
3707 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3710 for(i=1; i<context_count; i++){
3711 merge_context_after_me(s, s->thread_context[i]);
3713 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3714 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3717 if (s->me.scene_change_score > s->scenechange_threshold &&
3718 s->pict_type == AV_PICTURE_TYPE_P) {
3719 s->pict_type= AV_PICTURE_TYPE_I;
3720 for(i=0; i<s->mb_stride*s->mb_height; i++)
3721 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3722 if(s->msmpeg4_version >= 3)
3724 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3725 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3729 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3730 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3732 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3734 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3735 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3736 s->f_code= FFMAX3(s->f_code, a, b);
3739 ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
3740 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3741 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3745 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3746 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3751 if(s->pict_type==AV_PICTURE_TYPE_B){
3754 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3755 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3756 s->f_code = FFMAX(a, b);
3758 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3759 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3760 s->b_code = FFMAX(a, b);
3762 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3763 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3764 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3765 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3766 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3768 for(dir=0; dir<2; dir++){
3771 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3772 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3773 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3774 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3782 if (estimate_qp(s, 0) < 0)
3785 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3786 s->pict_type == AV_PICTURE_TYPE_I &&
3787 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3788 s->qscale= 3; //reduce clipping problems
3790 if (s->out_format == FMT_MJPEG) {
3791 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3792 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3794 if (s->avctx->intra_matrix) {
3796 luma_matrix = s->avctx->intra_matrix;
3798 if (s->avctx->chroma_intra_matrix)
3799 chroma_matrix = s->avctx->chroma_intra_matrix;
3801 /* for mjpeg, we do include qscale in the matrix */
3803 int j = s->idsp.idct_permutation[i];
3805 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3806 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3808 s->y_dc_scale_table=
3809 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3810 s->chroma_intra_matrix[0] =
3811 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3812 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3813 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3814 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3815 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3818 if(s->codec_id == AV_CODEC_ID_AMV){
3819 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3820 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3822 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3824 s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3825 s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3827 s->y_dc_scale_table= y;
3828 s->c_dc_scale_table= c;
3829 s->intra_matrix[0] = 13;
3830 s->chroma_intra_matrix[0] = 14;
3831 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3832 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3833 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3834 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3838 if (s->out_format == FMT_SPEEDHQ) {
3839 s->y_dc_scale_table=
3840 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3843 //FIXME var duplication
3844 s->current_picture_ptr->f->key_frame =
3845 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3846 s->current_picture_ptr->f->pict_type =
3847 s->current_picture.f->pict_type = s->pict_type;
3849 if (s->current_picture.f->key_frame)
3850 s->picture_in_gop_number=0;
3852 s->mb_x = s->mb_y = 0;
3853 s->last_bits= put_bits_count(&s->pb);
3854 switch(s->out_format) {
3855 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3857 /* s->huffman == HUFFMAN_TABLE_OPTIMAL can only be true for MJPEG. */
3858 if (!CONFIG_MJPEG_ENCODER || s->huffman != HUFFMAN_TABLE_OPTIMAL)
3859 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3860 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3864 if (CONFIG_SPEEDHQ_ENCODER)
3865 ff_speedhq_encode_picture_header(s);
3868 if (CONFIG_H261_ENCODER)
3869 ff_h261_encode_picture_header(s, picture_number);
3872 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3873 ff_wmv2_encode_picture_header(s, picture_number);
3874 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3875 ff_msmpeg4_encode_picture_header(s, picture_number);
3876 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3877 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3880 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3881 ret = ff_rv10_encode_picture_header(s, picture_number);
3885 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3886 ff_rv20_encode_picture_header(s, picture_number);
3887 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3888 ff_flv_encode_picture_header(s, picture_number);
3889 else if (CONFIG_H263_ENCODER)
3890 ff_h263_encode_picture_header(s, picture_number);
3893 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3894 ff_mpeg1_encode_picture_header(s, picture_number);
3899 bits= put_bits_count(&s->pb);
3900 s->header_bits= bits - s->last_bits;
3902 for(i=1; i<context_count; i++){
3903 update_duplicate_context_after_me(s->thread_context[i], s);
3905 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3906 for(i=1; i<context_count; i++){
3907 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3908 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3909 merge_context_after_encode(s, s->thread_context[i]);
3915 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3916 const int intra= s->mb_intra;
3919 s->dct_count[intra]++;
3921 for(i=0; i<64; i++){
3922 int level= block[i];
3926 s->dct_error_sum[intra][i] += level;
3927 level -= s->dct_offset[intra][i];
3928 if(level<0) level=0;
3930 s->dct_error_sum[intra][i] -= level;
3931 level += s->dct_offset[intra][i];
3932 if(level>0) level=0;
3939 static int dct_quantize_trellis_c(MpegEncContext *s,
3940 int16_t *block, int n,
3941 int qscale, int *overflow){
3943 const uint16_t *matrix;
3944 const uint8_t *scantable;
3945 const uint8_t *perm_scantable;
3947 unsigned int threshold1, threshold2;
3959 int coeff_count[64];
3960 int qmul, qadd, start_i, last_non_zero, i, dc;
3961 const int esc_length= s->ac_esc_length;
3963 uint8_t * last_length;
3964 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3967 s->fdsp.fdct(block);
3969 if(s->dct_error_sum)
3970 s->denoise_dct(s, block);
3972 qadd= ((qscale-1)|1)*8;
3974 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3975 else mpeg2_qscale = qscale << 1;
3979 scantable= s->intra_scantable.scantable;
3980 perm_scantable= s->intra_scantable.permutated;
3988 /* For AIC we skip quant/dequant of INTRADC */
3993 /* note: block[0] is assumed to be positive */
3994 block[0] = (block[0] + (q >> 1)) / q;
3997 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3998 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3999 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4000 bias= 1<<(QMAT_SHIFT-1);
4002 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4003 length = s->intra_chroma_ac_vlc_length;
4004 last_length= s->intra_chroma_ac_vlc_last_length;
4006 length = s->intra_ac_vlc_length;
4007 last_length= s->intra_ac_vlc_last_length;
4010 scantable= s->inter_scantable.scantable;
4011 perm_scantable= s->inter_scantable.permutated;
4014 qmat = s->q_inter_matrix[qscale];
4015 matrix = s->inter_matrix;
4016 length = s->inter_ac_vlc_length;
4017 last_length= s->inter_ac_vlc_last_length;
4021 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4022 threshold2= (threshold1<<1);
4024 for(i=63; i>=start_i; i--) {
4025 const int j = scantable[i];
4026 int level = block[j] * qmat[j];
4028 if(((unsigned)(level+threshold1))>threshold2){
4034 for(i=start_i; i<=last_non_zero; i++) {
4035 const int j = scantable[i];
4036 int level = block[j] * qmat[j];
4038 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4039 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4040 if(((unsigned)(level+threshold1))>threshold2){
4042 level= (bias + level)>>QMAT_SHIFT;
4044 coeff[1][i]= level-1;
4045 // coeff[2][k]= level-2;
4047 level= (bias - level)>>QMAT_SHIFT;
4048 coeff[0][i]= -level;
4049 coeff[1][i]= -level+1;
4050 // coeff[2][k]= -level+2;
4052 coeff_count[i]= FFMIN(level, 2);
4053 av_assert2(coeff_count[i]);
4056 coeff[0][i]= (level>>31)|1;
4061 *overflow= s->max_qcoeff < max; //overflow might have happened
4063 if(last_non_zero < start_i){
4064 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4065 return last_non_zero;
4068 score_tab[start_i]= 0;
4069 survivor[0]= start_i;
4072 for(i=start_i; i<=last_non_zero; i++){
4073 int level_index, j, zero_distortion;
4074 int dct_coeff= FFABS(block[ scantable[i] ]);
4075 int best_score=256*256*256*120;
4077 if (s->fdsp.fdct == ff_fdct_ifast)
4078 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4079 zero_distortion= dct_coeff*dct_coeff;
4081 for(level_index=0; level_index < coeff_count[i]; level_index++){
4083 int level= coeff[level_index][i];
4084 const int alevel= FFABS(level);
4089 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4090 unquant_coeff= alevel*qmul + qadd;
4091 } else if(s->out_format == FMT_MJPEG) {
4092 j = s->idsp.idct_permutation[scantable[i]];
4093 unquant_coeff = alevel * matrix[j] * 8;
4095 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4097 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4098 unquant_coeff = (unquant_coeff - 1) | 1;
4100 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4101 unquant_coeff = (unquant_coeff - 1) | 1;
4106 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4108 if((level&(~127)) == 0){
4109 for(j=survivor_count-1; j>=0; j--){
4110 int run= i - survivor[j];
4111 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4112 score += score_tab[i-run];
4114 if(score < best_score){
4117 level_tab[i+1]= level-64;
4121 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4122 for(j=survivor_count-1; j>=0; j--){
4123 int run= i - survivor[j];
4124 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4125 score += score_tab[i-run];
4126 if(score < last_score){
4129 last_level= level-64;
4135 distortion += esc_length*lambda;
4136 for(j=survivor_count-1; j>=0; j--){
4137 int run= i - survivor[j];
4138 int score= distortion + score_tab[i-run];
4140 if(score < best_score){
4143 level_tab[i+1]= level-64;
4147 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4148 for(j=survivor_count-1; j>=0; j--){
4149 int run= i - survivor[j];
4150 int score= distortion + score_tab[i-run];
4151 if(score < last_score){
4154 last_level= level-64;
4162 score_tab[i+1]= best_score;
4164 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4165 if(last_non_zero <= 27){
4166 for(; survivor_count; survivor_count--){
4167 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4171 for(; survivor_count; survivor_count--){
4172 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4177 survivor[ survivor_count++ ]= i+1;
4180 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4181 last_score= 256*256*256*120;
4182 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4183 int score= score_tab[i];
4185 score += lambda * 2; // FIXME more exact?
4187 if(score < last_score){
4190 last_level= level_tab[i];
4191 last_run= run_tab[i];
4196 s->coded_score[n] = last_score;
4198 dc= FFABS(block[0]);
4199 last_non_zero= last_i - 1;
4200 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4202 if(last_non_zero < start_i)
4203 return last_non_zero;
4205 if(last_non_zero == 0 && start_i == 0){
4207 int best_score= dc * dc;
4209 for(i=0; i<coeff_count[0]; i++){
4210 int level= coeff[i][0];
4211 int alevel= FFABS(level);
4212 int unquant_coeff, score, distortion;
4214 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4215 unquant_coeff= (alevel*qmul + qadd)>>3;
4217 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4218 unquant_coeff = (unquant_coeff - 1) | 1;
4220 unquant_coeff = (unquant_coeff + 4) >> 3;
4221 unquant_coeff<<= 3 + 3;
4223 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4225 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4226 else score= distortion + esc_length*lambda;
4228 if(score < best_score){
4230 best_level= level - 64;
4233 block[0]= best_level;
4234 s->coded_score[n] = best_score - dc*dc;
4235 if(best_level == 0) return -1;
4236 else return last_non_zero;
4240 av_assert2(last_level);
4242 block[ perm_scantable[last_non_zero] ]= last_level;
4245 for(; i>start_i; i -= run_tab[i] + 1){
4246 block[ perm_scantable[i-1] ]= level_tab[i];
4249 return last_non_zero;
4252 static int16_t basis[64][64];
4254 static void build_basis(uint8_t *perm){
4261 double s= 0.25*(1<<BASIS_SHIFT);
4263 int perm_index= perm[index];
4264 if(i==0) s*= sqrt(0.5);
4265 if(j==0) s*= sqrt(0.5);
4266 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4273 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4274 int16_t *block, int16_t *weight, int16_t *orig,
4277 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4278 const uint8_t *scantable;
4279 const uint8_t *perm_scantable;
4280 // unsigned int threshold1, threshold2;
4285 int qmul, qadd, start_i, last_non_zero, i, dc;
4287 uint8_t * last_length;
4289 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4291 if(basis[0][0] == 0)
4292 build_basis(s->idsp.idct_permutation);
4297 scantable= s->intra_scantable.scantable;
4298 perm_scantable= s->intra_scantable.permutated;
4305 /* For AIC we skip quant/dequant of INTRADC */
4309 q <<= RECON_SHIFT-3;
4310 /* note: block[0] is assumed to be positive */
4312 // block[0] = (block[0] + (q >> 1)) / q;
4314 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4315 // bias= 1<<(QMAT_SHIFT-1);
4316 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4317 length = s->intra_chroma_ac_vlc_length;
4318 last_length= s->intra_chroma_ac_vlc_last_length;
4320 length = s->intra_ac_vlc_length;
4321 last_length= s->intra_ac_vlc_last_length;
4324 scantable= s->inter_scantable.scantable;
4325 perm_scantable= s->inter_scantable.permutated;
4328 length = s->inter_ac_vlc_length;
4329 last_length= s->inter_ac_vlc_last_length;
4331 last_non_zero = s->block_last_index[n];
4333 dc += (1<<(RECON_SHIFT-1));
4334 for(i=0; i<64; i++){
4335 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4339 for(i=0; i<64; i++){
4344 w= FFABS(weight[i]) + qns*one;
4345 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4348 // w=weight[i] = (63*qns + (w/2)) / w;
4351 av_assert2(w<(1<<6));
4354 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4358 for(i=start_i; i<=last_non_zero; i++){
4359 int j= perm_scantable[i];
4360 const int level= block[j];
4364 if(level<0) coeff= qmul*level - qadd;
4365 else coeff= qmul*level + qadd;
4366 run_tab[rle_index++]=run;
4369 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4376 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4379 int run2, best_unquant_change=0, analyze_gradient;
4380 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4382 if(analyze_gradient){
4383 for(i=0; i<64; i++){
4386 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4392 const int level= block[0];
4393 int change, old_coeff;
4395 av_assert2(s->mb_intra);
4399 for(change=-1; change<=1; change+=2){
4400 int new_level= level + change;
4401 int score, new_coeff;
4403 new_coeff= q*new_level;
4404 if(new_coeff >= 2048 || new_coeff < 0)
4407 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4408 new_coeff - old_coeff);
4409 if(score<best_score){
4412 best_change= change;
4413 best_unquant_change= new_coeff - old_coeff;
4420 run2= run_tab[rle_index++];
4424 for(i=start_i; i<64; i++){
4425 int j= perm_scantable[i];
4426 const int level= block[j];
4427 int change, old_coeff;
4429 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4433 if(level<0) old_coeff= qmul*level - qadd;
4434 else old_coeff= qmul*level + qadd;
4435 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4439 av_assert2(run2>=0 || i >= last_non_zero );
4442 for(change=-1; change<=1; change+=2){
4443 int new_level= level + change;
4444 int score, new_coeff, unquant_change;
4447 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4451 if(new_level<0) new_coeff= qmul*new_level - qadd;
4452 else new_coeff= qmul*new_level + qadd;
4453 if(new_coeff >= 2048 || new_coeff <= -2048)
4455 //FIXME check for overflow
4458 if(level < 63 && level > -63){
4459 if(i < last_non_zero)
4460 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4461 - length[UNI_AC_ENC_INDEX(run, level+64)];
4463 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4464 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4467 av_assert2(FFABS(new_level)==1);
4469 if(analyze_gradient){
4470 int g= d1[ scantable[i] ];
4471 if(g && (g^new_level) >= 0)
4475 if(i < last_non_zero){
4476 int next_i= i + run2 + 1;
4477 int next_level= block[ perm_scantable[next_i] ] + 64;
4479 if(next_level&(~127))
4482 if(next_i < last_non_zero)
4483 score += length[UNI_AC_ENC_INDEX(run, 65)]
4484 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4485 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4487 score += length[UNI_AC_ENC_INDEX(run, 65)]
4488 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4489 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4491 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4493 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4494 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4500 av_assert2(FFABS(level)==1);
4502 if(i < last_non_zero){
4503 int next_i= i + run2 + 1;
4504 int next_level= block[ perm_scantable[next_i] ] + 64;
4506 if(next_level&(~127))
4509 if(next_i < last_non_zero)
4510 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4511 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4512 - length[UNI_AC_ENC_INDEX(run, 65)];
4514 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4515 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4516 - length[UNI_AC_ENC_INDEX(run, 65)];
4518 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4520 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4521 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4528 unquant_change= new_coeff - old_coeff;
4529 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4531 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4533 if(score<best_score){
4536 best_change= change;
4537 best_unquant_change= unquant_change;
4541 prev_level= level + 64;
4542 if(prev_level&(~127))
4552 int j= perm_scantable[ best_coeff ];
4554 block[j] += best_change;
4556 if(best_coeff > last_non_zero){
4557 last_non_zero= best_coeff;
4558 av_assert2(block[j]);
4560 for(; last_non_zero>=start_i; last_non_zero--){
4561 if(block[perm_scantable[last_non_zero]])
4568 for(i=start_i; i<=last_non_zero; i++){
4569 int j= perm_scantable[i];
4570 const int level= block[j];
4573 run_tab[rle_index++]=run;
4580 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4586 return last_non_zero;
4590 * Permute an 8x8 block according to permutation.
4591 * @param block the block which will be permuted according to
4592 * the given permutation vector
4593 * @param permutation the permutation vector
4594 * @param last the last non zero coefficient in scantable order, used to
4595 * speed the permutation up
4596 * @param scantable the used scantable, this is only used to speed the
4597 * permutation up, the block is not (inverse) permutated
4598 * to scantable order!
4600 void ff_block_permute(int16_t *block, uint8_t *permutation,
4601 const uint8_t *scantable, int last)
4608 //FIXME it is ok but not clean and might fail for some permutations
4609 // if (permutation[1] == 1)
4612 for (i = 0; i <= last; i++) {
4613 const int j = scantable[i];
4618 for (i = 0; i <= last; i++) {
4619 const int j = scantable[i];
4620 const int perm_j = permutation[j];
4621 block[perm_j] = temp[j];
4625 int ff_dct_quantize_c(MpegEncContext *s,
4626 int16_t *block, int n,
4627 int qscale, int *overflow)
4629 int i, j, level, last_non_zero, q, start_i;
4631 const uint8_t *scantable;
4634 unsigned int threshold1, threshold2;
4636 s->fdsp.fdct(block);
4638 if(s->dct_error_sum)
4639 s->denoise_dct(s, block);
4642 scantable= s->intra_scantable.scantable;
4650 /* For AIC we skip quant/dequant of INTRADC */
4653 /* note: block[0] is assumed to be positive */
4654 block[0] = (block[0] + (q >> 1)) / q;
4657 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4658 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4660 scantable= s->inter_scantable.scantable;
4663 qmat = s->q_inter_matrix[qscale];
4664 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4666 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4667 threshold2= (threshold1<<1);
4668 for(i=63;i>=start_i;i--) {
4670 level = block[j] * qmat[j];
4672 if(((unsigned)(level+threshold1))>threshold2){
4679 for(i=start_i; i<=last_non_zero; i++) {
4681 level = block[j] * qmat[j];
4683 // if( bias+level >= (1<<QMAT_SHIFT)
4684 // || bias-level >= (1<<QMAT_SHIFT)){
4685 if(((unsigned)(level+threshold1))>threshold2){
4687 level= (bias + level)>>QMAT_SHIFT;
4690 level= (bias - level)>>QMAT_SHIFT;
4698 *overflow= s->max_qcoeff < max; //overflow might have happened
4700 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4701 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4702 ff_block_permute(block, s->idsp.idct_permutation,
4703 scantable, last_non_zero);
4705 return last_non_zero;
4708 #define OFFSET(x) offsetof(MpegEncContext, x)
4709 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4710 static const AVOption h263_options[] = {
4711 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4712 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4714 #if FF_API_MPEGVIDEO_OPTS
4715 FF_MPV_DEPRECATED_MPEG_QUANT_OPT
4716 FF_MPV_DEPRECATED_A53_CC_OPT
4721 static const AVClass h263_class = {
4722 .class_name = "H.263 encoder",
4723 .item_name = av_default_item_name,
4724 .option = h263_options,
4725 .version = LIBAVUTIL_VERSION_INT,
4728 AVCodec ff_h263_encoder = {
4730 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4731 .type = AVMEDIA_TYPE_VIDEO,
4732 .id = AV_CODEC_ID_H263,
4733 .priv_data_size = sizeof(MpegEncContext),
4734 .init = ff_mpv_encode_init,
4735 .encode2 = ff_mpv_encode_picture,
4736 .close = ff_mpv_encode_end,
4737 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4738 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4739 .priv_class = &h263_class,
4742 static const AVOption h263p_options[] = {
4743 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4744 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4745 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4746 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4748 #if FF_API_MPEGVIDEO_OPTS
4749 FF_MPV_DEPRECATED_MPEG_QUANT_OPT
4750 FF_MPV_DEPRECATED_A53_CC_OPT
4754 static const AVClass h263p_class = {
4755 .class_name = "H.263p encoder",
4756 .item_name = av_default_item_name,
4757 .option = h263p_options,
4758 .version = LIBAVUTIL_VERSION_INT,
4761 AVCodec ff_h263p_encoder = {
4763 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4764 .type = AVMEDIA_TYPE_VIDEO,
4765 .id = AV_CODEC_ID_H263P,
4766 .priv_data_size = sizeof(MpegEncContext),
4767 .init = ff_mpv_encode_init,
4768 .encode2 = ff_mpv_encode_picture,
4769 .close = ff_mpv_encode_end,
4770 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4771 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4772 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4773 .priv_class = &h263p_class,
4776 static const AVClass msmpeg4v2_class = {
4777 .class_name = "msmpeg4v2 encoder",
4778 .item_name = av_default_item_name,
4779 .option = ff_mpv_generic_options,
4780 .version = LIBAVUTIL_VERSION_INT,
4783 AVCodec ff_msmpeg4v2_encoder = {
4784 .name = "msmpeg4v2",
4785 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4786 .type = AVMEDIA_TYPE_VIDEO,
4787 .id = AV_CODEC_ID_MSMPEG4V2,
4788 .priv_data_size = sizeof(MpegEncContext),
4789 .init = ff_mpv_encode_init,
4790 .encode2 = ff_mpv_encode_picture,
4791 .close = ff_mpv_encode_end,
4792 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4793 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4794 .priv_class = &msmpeg4v2_class,
4797 static const AVClass msmpeg4v3_class = {
4798 .class_name = "msmpeg4v3 encoder",
4799 .item_name = av_default_item_name,
4800 .option = ff_mpv_generic_options,
4801 .version = LIBAVUTIL_VERSION_INT,
4804 AVCodec ff_msmpeg4v3_encoder = {
4806 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4807 .type = AVMEDIA_TYPE_VIDEO,
4808 .id = AV_CODEC_ID_MSMPEG4V3,
4809 .priv_data_size = sizeof(MpegEncContext),
4810 .init = ff_mpv_encode_init,
4811 .encode2 = ff_mpv_encode_picture,
4812 .close = ff_mpv_encode_end,
4813 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4814 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4815 .priv_class = &msmpeg4v3_class,
4818 static const AVClass wmv1_class = {
4819 .class_name = "wmv1 encoder",
4820 .item_name = av_default_item_name,
4821 .option = ff_mpv_generic_options,
4822 .version = LIBAVUTIL_VERSION_INT,
4825 AVCodec ff_wmv1_encoder = {
4827 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4828 .type = AVMEDIA_TYPE_VIDEO,
4829 .id = AV_CODEC_ID_WMV1,
4830 .priv_data_size = sizeof(MpegEncContext),
4831 .init = ff_mpv_encode_init,
4832 .encode2 = ff_mpv_encode_picture,
4833 .close = ff_mpv_encode_end,
4834 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4835 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4836 .priv_class = &wmv1_class,