2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/mem_internal.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/thread.h"
47 #include "mpegvideo.h"
48 #include "mpegvideodata.h"
52 #include "mjpegenc_common.h"
54 #include "mpegutils.h"
56 #include "speedhqenc.h"
58 #include "pixblockdsp.h"
62 #include "aandcttab.h"
64 #include "mpeg4video.h"
66 #include "bytestream.h"
69 #include "packet_internal.h"
73 #define QUANT_BIAS_SHIFT 8
75 #define QMAT_SHIFT_MMX 16
78 static int encode_picture(MpegEncContext *s, int picture_number);
79 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
80 static int sse_mb(MpegEncContext *s);
81 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
82 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
84 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
85 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
87 const AVOption ff_mpv_generic_options[] = {
89 #if FF_API_MPEGVIDEO_OPTS
90 FF_MPV_DEPRECATED_MPEG_QUANT_OPT
95 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
96 uint16_t (*qmat16)[2][64],
97 const uint16_t *quant_matrix,
98 int bias, int qmin, int qmax, int intra)
100 FDCTDSPContext *fdsp = &s->fdsp;
104 for (qscale = qmin; qscale <= qmax; qscale++) {
108 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
109 else qscale2 = qscale << 1;
111 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
113 fdsp->fdct == ff_faandct ||
114 #endif /* CONFIG_FAANDCT */
115 fdsp->fdct == ff_jpeg_fdct_islow_10) {
116 for (i = 0; i < 64; i++) {
117 const int j = s->idsp.idct_permutation[i];
118 int64_t den = (int64_t) qscale2 * quant_matrix[j];
119 /* 16 <= qscale * quant_matrix[i] <= 7905
120 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
121 * 19952 <= x <= 249205026
122 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
123 * 3444240 >= (1 << 36) / (x) >= 275 */
125 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
127 } else if (fdsp->fdct == ff_fdct_ifast) {
128 for (i = 0; i < 64; i++) {
129 const int j = s->idsp.idct_permutation[i];
130 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
131 /* 16 <= qscale * quant_matrix[i] <= 7905
132 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
133 * 19952 <= x <= 249205026
134 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
135 * 3444240 >= (1 << 36) / (x) >= 275 */
137 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
140 for (i = 0; i < 64; i++) {
141 const int j = s->idsp.idct_permutation[i];
142 int64_t den = (int64_t) qscale2 * quant_matrix[j];
143 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
144 * Assume x = qscale * quant_matrix[i]
146 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
147 * so 32768 >= (1 << 19) / (x) >= 67 */
148 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
149 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
150 // (qscale * quant_matrix[i]);
151 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
153 if (qmat16[qscale][0][i] == 0 ||
154 qmat16[qscale][0][i] == 128 * 256)
155 qmat16[qscale][0][i] = 128 * 256 - 1;
156 qmat16[qscale][1][i] =
157 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
158 qmat16[qscale][0][i]);
162 for (i = intra; i < 64; i++) {
164 if (fdsp->fdct == ff_fdct_ifast) {
165 max = (8191LL * ff_aanscales[i]) >> 14;
167 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
173 av_log(s->avctx, AV_LOG_INFO,
174 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
179 static inline void update_qscale(MpegEncContext *s)
181 if (s->q_scale_type == 1 && 0) {
183 int bestdiff=INT_MAX;
186 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
187 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
188 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
189 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
191 if (diff < bestdiff) {
198 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
199 (FF_LAMBDA_SHIFT + 7);
200 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
203 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
207 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
213 for (i = 0; i < 64; i++) {
214 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
221 * init s->current_picture.qscale_table from s->lambda_table
223 void ff_init_qscale_tab(MpegEncContext *s)
225 int8_t * const qscale_table = s->current_picture.qscale_table;
228 for (i = 0; i < s->mb_num; i++) {
229 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
230 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
231 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
236 static void update_duplicate_context_after_me(MpegEncContext *dst,
239 #define COPY(a) dst->a= src->a
241 COPY(current_picture);
247 COPY(picture_in_gop_number);
248 COPY(gop_picture_number);
249 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
250 COPY(progressive_frame); // FIXME don't set in encode_header
251 COPY(partitioned_frame); // FIXME don't set in encode_header
255 static void mpv_encode_init_static(void)
257 for (int i = -16; i < 16; i++)
258 default_fcode_tab[i + MAX_MV] = 1;
262 * Set the given MpegEncContext to defaults for encoding.
263 * the changed fields will not depend upon the prior state of the MpegEncContext.
265 static void mpv_encode_defaults(MpegEncContext *s)
267 static AVOnce init_static_once = AV_ONCE_INIT;
269 ff_mpv_common_defaults(s);
271 ff_thread_once(&init_static_once, mpv_encode_init_static);
273 s->me.mv_penalty = default_mv_penalty;
274 s->fcode_tab = default_fcode_tab;
276 s->input_picture_number = 0;
277 s->picture_in_gop_number = 0;
280 av_cold int ff_dct_encode_init(MpegEncContext *s)
283 ff_dct_encode_init_x86(s);
285 if (CONFIG_H263_ENCODER)
286 ff_h263dsp_init(&s->h263dsp);
287 if (!s->dct_quantize)
288 s->dct_quantize = ff_dct_quantize_c;
290 s->denoise_dct = denoise_dct_c;
291 s->fast_dct_quantize = s->dct_quantize;
292 if (s->avctx->trellis)
293 s->dct_quantize = dct_quantize_trellis_c;
298 /* init video encoder */
299 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
301 MpegEncContext *s = avctx->priv_data;
302 AVCPBProperties *cpb_props;
305 mpv_encode_defaults(s);
307 switch (avctx->pix_fmt) {
308 case AV_PIX_FMT_YUVJ444P:
309 case AV_PIX_FMT_YUV444P:
310 s->chroma_format = CHROMA_444;
312 case AV_PIX_FMT_YUVJ422P:
313 case AV_PIX_FMT_YUV422P:
314 s->chroma_format = CHROMA_422;
316 case AV_PIX_FMT_YUVJ420P:
317 case AV_PIX_FMT_YUV420P:
319 s->chroma_format = CHROMA_420;
323 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
325 #if FF_API_PRIVATE_OPT
326 FF_DISABLE_DEPRECATION_WARNINGS
327 if (avctx->rtp_payload_size)
328 s->rtp_payload_size = avctx->rtp_payload_size;
329 if (avctx->me_penalty_compensation)
330 s->me_penalty_compensation = avctx->me_penalty_compensation;
332 s->me_pre = avctx->pre_me;
333 FF_ENABLE_DEPRECATION_WARNINGS
336 s->bit_rate = avctx->bit_rate;
337 s->width = avctx->width;
338 s->height = avctx->height;
339 if (avctx->gop_size > 600 &&
340 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
341 av_log(avctx, AV_LOG_WARNING,
342 "keyframe interval too large!, reducing it from %d to %d\n",
343 avctx->gop_size, 600);
344 avctx->gop_size = 600;
346 s->gop_size = avctx->gop_size;
348 if (avctx->max_b_frames > MAX_B_FRAMES) {
349 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
350 "is %d.\n", MAX_B_FRAMES);
351 avctx->max_b_frames = MAX_B_FRAMES;
353 s->max_b_frames = avctx->max_b_frames;
354 s->codec_id = avctx->codec->id;
355 s->strict_std_compliance = avctx->strict_std_compliance;
356 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
357 s->rtp_mode = !!s->rtp_payload_size;
358 s->intra_dc_precision = avctx->intra_dc_precision;
360 // workaround some differences between how applications specify dc precision
361 if (s->intra_dc_precision < 0) {
362 s->intra_dc_precision += 8;
363 } else if (s->intra_dc_precision >= 8)
364 s->intra_dc_precision -= 8;
366 if (s->intra_dc_precision < 0) {
367 av_log(avctx, AV_LOG_ERROR,
368 "intra dc precision must be positive, note some applications use"
369 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
370 return AVERROR(EINVAL);
373 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
376 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
377 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
378 return AVERROR(EINVAL);
380 s->user_specified_pts = AV_NOPTS_VALUE;
382 if (s->gop_size <= 1) {
390 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
392 s->adaptive_quant = (avctx->lumi_masking ||
393 avctx->dark_masking ||
394 avctx->temporal_cplx_masking ||
395 avctx->spatial_cplx_masking ||
398 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
401 s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
403 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
404 switch(avctx->codec_id) {
405 case AV_CODEC_ID_MPEG1VIDEO:
406 case AV_CODEC_ID_MPEG2VIDEO:
407 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
409 case AV_CODEC_ID_MPEG4:
410 case AV_CODEC_ID_MSMPEG4V1:
411 case AV_CODEC_ID_MSMPEG4V2:
412 case AV_CODEC_ID_MSMPEG4V3:
413 if (avctx->rc_max_rate >= 15000000) {
414 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
415 } else if(avctx->rc_max_rate >= 2000000) {
416 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
417 } else if(avctx->rc_max_rate >= 384000) {
418 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
420 avctx->rc_buffer_size = 40;
421 avctx->rc_buffer_size *= 16384;
424 if (avctx->rc_buffer_size) {
425 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
429 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
430 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
431 return AVERROR(EINVAL);
434 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
435 av_log(avctx, AV_LOG_INFO,
436 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
439 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
440 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
441 return AVERROR(EINVAL);
444 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
445 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
446 return AVERROR(EINVAL);
449 if (avctx->rc_max_rate &&
450 avctx->rc_max_rate == avctx->bit_rate &&
451 avctx->rc_max_rate != avctx->rc_min_rate) {
452 av_log(avctx, AV_LOG_INFO,
453 "impossible bitrate constraints, this will fail\n");
456 if (avctx->rc_buffer_size &&
457 avctx->bit_rate * (int64_t)avctx->time_base.num >
458 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
459 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
460 return AVERROR(EINVAL);
463 if (!s->fixed_qscale &&
464 avctx->bit_rate * av_q2d(avctx->time_base) >
465 avctx->bit_rate_tolerance) {
466 av_log(avctx, AV_LOG_WARNING,
467 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
468 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
471 if (avctx->rc_max_rate &&
472 avctx->rc_min_rate == avctx->rc_max_rate &&
473 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
474 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
475 90000LL * (avctx->rc_buffer_size - 1) >
476 avctx->rc_max_rate * 0xFFFFLL) {
477 av_log(avctx, AV_LOG_INFO,
478 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
479 "specified vbv buffer is too large for the given bitrate!\n");
482 if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
483 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
484 s->codec_id != AV_CODEC_ID_FLV1) {
485 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
486 return AVERROR(EINVAL);
489 if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
490 av_log(avctx, AV_LOG_ERROR,
491 "OBMC is only supported with simple mb decision\n");
492 return AVERROR(EINVAL);
495 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
496 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
497 return AVERROR(EINVAL);
500 if (s->max_b_frames &&
501 s->codec_id != AV_CODEC_ID_MPEG4 &&
502 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
503 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
504 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
505 return AVERROR(EINVAL);
507 if (s->max_b_frames < 0) {
508 av_log(avctx, AV_LOG_ERROR,
509 "max b frames must be 0 or positive for mpegvideo based encoders\n");
510 return AVERROR(EINVAL);
513 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
514 s->codec_id == AV_CODEC_ID_H263 ||
515 s->codec_id == AV_CODEC_ID_H263P) &&
516 (avctx->sample_aspect_ratio.num > 255 ||
517 avctx->sample_aspect_ratio.den > 255)) {
518 av_log(avctx, AV_LOG_WARNING,
519 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
520 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
521 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
522 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
525 if ((s->codec_id == AV_CODEC_ID_H263 ||
526 s->codec_id == AV_CODEC_ID_H263P) &&
527 (avctx->width > 2048 ||
528 avctx->height > 1152 )) {
529 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
530 return AVERROR(EINVAL);
532 if ((s->codec_id == AV_CODEC_ID_H263 ||
533 s->codec_id == AV_CODEC_ID_H263P ||
534 s->codec_id == AV_CODEC_ID_RV20) &&
535 ((avctx->width &3) ||
536 (avctx->height&3) )) {
537 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
538 return AVERROR(EINVAL);
541 if (s->codec_id == AV_CODEC_ID_RV10 &&
543 avctx->height&15 )) {
544 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
545 return AVERROR(EINVAL);
548 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
549 s->codec_id == AV_CODEC_ID_WMV2) &&
551 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
552 return AVERROR(EINVAL);
555 if ((avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
556 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
557 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
558 return AVERROR(EINVAL);
561 #if FF_API_PRIVATE_OPT
562 FF_DISABLE_DEPRECATION_WARNINGS
563 if (avctx->mpeg_quant)
565 FF_ENABLE_DEPRECATION_WARNINGS
567 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
568 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
569 av_log(avctx, AV_LOG_ERROR,
570 "mpeg2 style quantization not supported by codec\n");
571 return AVERROR(EINVAL);
575 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
576 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
577 return AVERROR(EINVAL);
580 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
581 avctx->mb_decision != FF_MB_DECISION_RD) {
582 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
583 return AVERROR(EINVAL);
586 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
587 (s->codec_id == AV_CODEC_ID_AMV ||
588 s->codec_id == AV_CODEC_ID_MJPEG)) {
589 // Used to produce garbage with MJPEG.
590 av_log(avctx, AV_LOG_ERROR,
591 "QP RD is no longer compatible with MJPEG or AMV\n");
592 return AVERROR(EINVAL);
595 #if FF_API_PRIVATE_OPT
596 FF_DISABLE_DEPRECATION_WARNINGS
597 if (avctx->scenechange_threshold)
598 s->scenechange_threshold = avctx->scenechange_threshold;
599 FF_ENABLE_DEPRECATION_WARNINGS
602 if (s->scenechange_threshold < 1000000000 &&
603 (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
604 av_log(avctx, AV_LOG_ERROR,
605 "closed gop with scene change detection are not supported yet, "
606 "set threshold to 1000000000\n");
607 return AVERROR_PATCHWELCOME;
610 if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
611 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
612 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
613 av_log(avctx, AV_LOG_ERROR,
614 "low delay forcing is only available for mpeg2, "
615 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
616 return AVERROR(EINVAL);
618 if (s->max_b_frames != 0) {
619 av_log(avctx, AV_LOG_ERROR,
620 "B-frames cannot be used with low delay\n");
621 return AVERROR(EINVAL);
625 if (s->q_scale_type == 1) {
626 if (avctx->qmax > 28) {
627 av_log(avctx, AV_LOG_ERROR,
628 "non linear quant only supports qmax <= 28 currently\n");
629 return AVERROR_PATCHWELCOME;
633 if (avctx->slices > 1 &&
634 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
635 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
636 return AVERROR(EINVAL);
639 if (avctx->thread_count > 1 &&
640 s->codec_id != AV_CODEC_ID_MPEG4 &&
641 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
642 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
643 s->codec_id != AV_CODEC_ID_MJPEG &&
644 (s->codec_id != AV_CODEC_ID_H263P)) {
645 av_log(avctx, AV_LOG_ERROR,
646 "multi threaded encoding not supported by codec\n");
647 return AVERROR_PATCHWELCOME;
650 if (avctx->thread_count < 1) {
651 av_log(avctx, AV_LOG_ERROR,
652 "automatic thread number detection not supported by codec, "
654 return AVERROR_PATCHWELCOME;
657 #if FF_API_PRIVATE_OPT
658 FF_DISABLE_DEPRECATION_WARNINGS
659 if (avctx->b_frame_strategy)
660 s->b_frame_strategy = avctx->b_frame_strategy;
661 if (avctx->b_sensitivity != 40)
662 s->b_sensitivity = avctx->b_sensitivity;
663 FF_ENABLE_DEPRECATION_WARNINGS
666 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
667 av_log(avctx, AV_LOG_INFO,
668 "notice: b_frame_strategy only affects the first pass\n");
669 s->b_frame_strategy = 0;
672 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
674 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
675 avctx->time_base.den /= i;
676 avctx->time_base.num /= i;
680 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
681 // (a + x * 3 / 8) / x
682 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
683 s->inter_quant_bias = 0;
685 s->intra_quant_bias = 0;
687 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
690 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
691 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
692 return AVERROR(EINVAL);
695 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
697 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
698 avctx->time_base.den > (1 << 16) - 1) {
699 av_log(avctx, AV_LOG_ERROR,
700 "timebase %d/%d not supported by MPEG 4 standard, "
701 "the maximum admitted value for the timebase denominator "
702 "is %d\n", avctx->time_base.num, avctx->time_base.den,
704 return AVERROR(EINVAL);
706 s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
708 switch (avctx->codec->id) {
709 case AV_CODEC_ID_MPEG1VIDEO:
710 s->out_format = FMT_MPEG1;
711 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
712 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
714 case AV_CODEC_ID_MPEG2VIDEO:
715 s->out_format = FMT_MPEG1;
716 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
717 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
720 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
721 case AV_CODEC_ID_MJPEG:
722 case AV_CODEC_ID_AMV:
723 s->out_format = FMT_MJPEG;
724 s->intra_only = 1; /* force intra only for jpeg */
725 if ((ret = ff_mjpeg_encode_init(s)) < 0)
731 case AV_CODEC_ID_SPEEDHQ:
732 s->out_format = FMT_SPEEDHQ;
733 s->intra_only = 1; /* force intra only for SHQ */
734 if (!CONFIG_SPEEDHQ_ENCODER)
735 return AVERROR_ENCODER_NOT_FOUND;
736 if ((ret = ff_speedhq_encode_init(s)) < 0)
741 case AV_CODEC_ID_H261:
742 if (!CONFIG_H261_ENCODER)
743 return AVERROR_ENCODER_NOT_FOUND;
744 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
745 av_log(avctx, AV_LOG_ERROR,
746 "The specified picture size of %dx%d is not valid for the "
747 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
748 s->width, s->height);
749 return AVERROR(EINVAL);
751 s->out_format = FMT_H261;
754 s->rtp_mode = 0; /* Sliced encoding not supported */
756 case AV_CODEC_ID_H263:
757 if (!CONFIG_H263_ENCODER)
758 return AVERROR_ENCODER_NOT_FOUND;
759 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
760 s->width, s->height) == 8) {
761 av_log(avctx, AV_LOG_ERROR,
762 "The specified picture size of %dx%d is not valid for "
763 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
764 "352x288, 704x576, and 1408x1152. "
765 "Try H.263+.\n", s->width, s->height);
766 return AVERROR(EINVAL);
768 s->out_format = FMT_H263;
772 case AV_CODEC_ID_H263P:
773 s->out_format = FMT_H263;
776 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
777 s->modified_quant = s->h263_aic;
778 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
779 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
782 /* These are just to be sure */
786 case AV_CODEC_ID_FLV1:
787 s->out_format = FMT_H263;
788 s->h263_flv = 2; /* format = 1; 11-bit codes */
789 s->unrestricted_mv = 1;
790 s->rtp_mode = 0; /* don't allow GOB */
794 case AV_CODEC_ID_RV10:
795 s->out_format = FMT_H263;
799 case AV_CODEC_ID_RV20:
800 s->out_format = FMT_H263;
803 s->modified_quant = 1;
807 s->unrestricted_mv = 0;
809 case AV_CODEC_ID_MPEG4:
810 s->out_format = FMT_H263;
812 s->unrestricted_mv = 1;
813 s->low_delay = s->max_b_frames ? 0 : 1;
814 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
816 case AV_CODEC_ID_MSMPEG4V2:
817 s->out_format = FMT_H263;
819 s->unrestricted_mv = 1;
820 s->msmpeg4_version = 2;
824 case AV_CODEC_ID_MSMPEG4V3:
825 s->out_format = FMT_H263;
827 s->unrestricted_mv = 1;
828 s->msmpeg4_version = 3;
829 s->flipflop_rounding = 1;
833 case AV_CODEC_ID_WMV1:
834 s->out_format = FMT_H263;
836 s->unrestricted_mv = 1;
837 s->msmpeg4_version = 4;
838 s->flipflop_rounding = 1;
842 case AV_CODEC_ID_WMV2:
843 s->out_format = FMT_H263;
845 s->unrestricted_mv = 1;
846 s->msmpeg4_version = 5;
847 s->flipflop_rounding = 1;
852 return AVERROR(EINVAL);
855 #if FF_API_PRIVATE_OPT
856 FF_DISABLE_DEPRECATION_WARNINGS
857 if (avctx->noise_reduction)
858 s->noise_reduction = avctx->noise_reduction;
859 FF_ENABLE_DEPRECATION_WARNINGS
862 avctx->has_b_frames = !s->low_delay;
866 s->progressive_frame =
867 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
868 AV_CODEC_FLAG_INTERLACED_ME) ||
873 if ((ret = ff_mpv_common_init(s)) < 0)
876 ff_fdctdsp_init(&s->fdsp, avctx);
877 ff_me_cmp_init(&s->mecc, avctx);
878 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
879 ff_pixblockdsp_init(&s->pdsp, avctx);
880 ff_qpeldsp_init(&s->qdsp);
882 if (s->msmpeg4_version) {
883 int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
884 if (!(s->ac_stats = av_mallocz(ac_stats_size)))
885 return AVERROR(ENOMEM);
888 if (!(avctx->stats_out = av_mallocz(256)) ||
889 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
890 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
891 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
892 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
893 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
894 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
895 !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
896 !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
897 return AVERROR(ENOMEM);
899 if (s->noise_reduction) {
900 if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
901 return AVERROR(ENOMEM);
904 ff_dct_encode_init(s);
906 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
907 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
909 if (s->slice_context_count > 1) {
912 if (avctx->codec_id == AV_CODEC_ID_H263P)
913 s->h263_slice_structured = 1;
916 s->quant_precision = 5;
918 #if FF_API_PRIVATE_OPT
919 FF_DISABLE_DEPRECATION_WARNINGS
920 if (avctx->frame_skip_threshold)
921 s->frame_skip_threshold = avctx->frame_skip_threshold;
922 if (avctx->frame_skip_factor)
923 s->frame_skip_factor = avctx->frame_skip_factor;
924 if (avctx->frame_skip_exp)
925 s->frame_skip_exp = avctx->frame_skip_exp;
926 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
927 s->frame_skip_cmp = avctx->frame_skip_cmp;
928 FF_ENABLE_DEPRECATION_WARNINGS
931 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
932 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
934 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
935 ff_h261_encode_init(s);
936 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
937 ff_h263_encode_init(s);
938 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
939 ff_msmpeg4_encode_init(s);
940 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
941 && s->out_format == FMT_MPEG1)
942 ff_mpeg1_encode_init(s);
945 for (i = 0; i < 64; i++) {
946 int j = s->idsp.idct_permutation[i];
947 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
949 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
950 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
951 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
953 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
954 } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
956 s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
959 s->chroma_intra_matrix[j] =
960 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
961 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
963 if (avctx->intra_matrix)
964 s->intra_matrix[j] = avctx->intra_matrix[i];
965 if (avctx->inter_matrix)
966 s->inter_matrix[j] = avctx->inter_matrix[i];
969 /* precompute matrix */
970 /* for mjpeg, we do include qscale in the matrix */
971 if (s->out_format != FMT_MJPEG) {
972 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
973 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
975 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
976 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
980 if ((ret = ff_rate_control_init(s)) < 0)
983 #if FF_API_PRIVATE_OPT
984 FF_DISABLE_DEPRECATION_WARNINGS
985 if (avctx->brd_scale)
986 s->brd_scale = avctx->brd_scale;
988 if (avctx->prediction_method)
989 s->pred = avctx->prediction_method + 1;
990 FF_ENABLE_DEPRECATION_WARNINGS
993 if (s->b_frame_strategy == 2) {
994 for (i = 0; i < s->max_b_frames + 2; i++) {
995 s->tmp_frames[i] = av_frame_alloc();
996 if (!s->tmp_frames[i])
997 return AVERROR(ENOMEM);
999 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1000 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1001 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1003 ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1009 cpb_props = ff_add_cpb_side_data(avctx);
1011 return AVERROR(ENOMEM);
1012 cpb_props->max_bitrate = avctx->rc_max_rate;
1013 cpb_props->min_bitrate = avctx->rc_min_rate;
1014 cpb_props->avg_bitrate = avctx->bit_rate;
1015 cpb_props->buffer_size = avctx->rc_buffer_size;
1020 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1022 MpegEncContext *s = avctx->priv_data;
1025 ff_rate_control_uninit(s);
1027 ff_mpv_common_end(s);
1028 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
1029 s->out_format == FMT_MJPEG)
1030 ff_mjpeg_encode_close(s);
1032 av_freep(&avctx->extradata);
1034 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1035 av_frame_free(&s->tmp_frames[i]);
1037 ff_free_picture_tables(&s->new_picture);
1038 ff_mpeg_unref_picture(avctx, &s->new_picture);
1040 av_freep(&avctx->stats_out);
1041 av_freep(&s->ac_stats);
1043 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1044 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1045 s->q_chroma_intra_matrix= NULL;
1046 s->q_chroma_intra_matrix16= NULL;
1047 av_freep(&s->q_intra_matrix);
1048 av_freep(&s->q_inter_matrix);
1049 av_freep(&s->q_intra_matrix16);
1050 av_freep(&s->q_inter_matrix16);
1051 av_freep(&s->input_picture);
1052 av_freep(&s->reordered_input_picture);
1053 av_freep(&s->dct_offset);
1058 static int get_sae(uint8_t *src, int ref, int stride)
1063 for (y = 0; y < 16; y++) {
1064 for (x = 0; x < 16; x++) {
1065 acc += FFABS(src[x + y * stride] - ref);
1072 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1073 uint8_t *ref, int stride)
1079 h = s->height & ~15;
1081 for (y = 0; y < h; y += 16) {
1082 for (x = 0; x < w; x += 16) {
1083 int offset = x + y * stride;
1084 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1086 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1087 int sae = get_sae(src + offset, mean, stride);
1089 acc += sae + 500 < sad;
1095 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1097 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1098 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1099 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1100 &s->linesize, &s->uvlinesize);
1103 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1105 Picture *pic = NULL;
1107 int i, display_picture_number = 0, ret;
1108 int encoding_delay = s->max_b_frames ? s->max_b_frames
1109 : (s->low_delay ? 0 : 1);
1110 int flush_offset = 1;
1115 display_picture_number = s->input_picture_number++;
1117 if (pts != AV_NOPTS_VALUE) {
1118 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1119 int64_t last = s->user_specified_pts;
1122 av_log(s->avctx, AV_LOG_ERROR,
1123 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1125 return AVERROR(EINVAL);
1128 if (!s->low_delay && display_picture_number == 1)
1129 s->dts_delta = pts - last;
1131 s->user_specified_pts = pts;
1133 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1134 s->user_specified_pts =
1135 pts = s->user_specified_pts + 1;
1136 av_log(s->avctx, AV_LOG_INFO,
1137 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1140 pts = display_picture_number;
1144 if (!pic_arg->buf[0] ||
1145 pic_arg->linesize[0] != s->linesize ||
1146 pic_arg->linesize[1] != s->uvlinesize ||
1147 pic_arg->linesize[2] != s->uvlinesize)
1149 if ((s->width & 15) || (s->height & 15))
1151 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1153 if (s->linesize & (STRIDE_ALIGN-1))
1156 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1157 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1159 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1163 pic = &s->picture[i];
1167 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1170 ret = alloc_picture(s, pic, direct);
1175 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1176 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1177 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1180 int h_chroma_shift, v_chroma_shift;
1181 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1185 for (i = 0; i < 3; i++) {
1186 int src_stride = pic_arg->linesize[i];
1187 int dst_stride = i ? s->uvlinesize : s->linesize;
1188 int h_shift = i ? h_chroma_shift : 0;
1189 int v_shift = i ? v_chroma_shift : 0;
1190 int w = s->width >> h_shift;
1191 int h = s->height >> v_shift;
1192 uint8_t *src = pic_arg->data[i];
1193 uint8_t *dst = pic->f->data[i];
1196 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1197 && !s->progressive_sequence
1198 && FFALIGN(s->height, 32) - s->height > 16)
1201 if (!s->avctx->rc_buffer_size)
1202 dst += INPLACE_OFFSET;
1204 if (src_stride == dst_stride)
1205 memcpy(dst, src, src_stride * h);
1208 uint8_t *dst2 = dst;
1210 memcpy(dst2, src, w);
1215 if ((s->width & 15) || (s->height & (vpad-1))) {
1216 s->mpvencdsp.draw_edges(dst, dst_stride,
1226 ret = av_frame_copy_props(pic->f, pic_arg);
1230 pic->f->display_picture_number = display_picture_number;
1231 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1233 /* Flushing: When we have not received enough input frames,
1234 * ensure s->input_picture[0] contains the first picture */
1235 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1236 if (s->input_picture[flush_offset])
1239 if (flush_offset <= 1)
1242 encoding_delay = encoding_delay - flush_offset + 1;
1245 /* shift buffer entries */
1246 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1247 s->input_picture[i - flush_offset] = s->input_picture[i];
1249 s->input_picture[encoding_delay] = (Picture*) pic;
1254 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1258 int64_t score64 = 0;
1260 for (plane = 0; plane < 3; plane++) {
1261 const int stride = p->f->linesize[plane];
1262 const int bw = plane ? 1 : 2;
1263 for (y = 0; y < s->mb_height * bw; y++) {
1264 for (x = 0; x < s->mb_width * bw; x++) {
1265 int off = p->shared ? 0 : 16;
1266 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1267 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1268 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1270 switch (FFABS(s->frame_skip_exp)) {
1271 case 0: score = FFMAX(score, v); break;
1272 case 1: score += FFABS(v); break;
1273 case 2: score64 += v * (int64_t)v; break;
1274 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1275 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1284 if (s->frame_skip_exp < 0)
1285 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1286 -1.0/s->frame_skip_exp);
1288 if (score64 < s->frame_skip_threshold)
1290 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1295 static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
1300 ret = avcodec_send_frame(c, frame);
1305 ret = avcodec_receive_packet(c, pkt);
1308 av_packet_unref(pkt);
1309 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1316 static int estimate_best_b_count(MpegEncContext *s)
1318 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1320 const int scale = s->brd_scale;
1321 int width = s->width >> scale;
1322 int height = s->height >> scale;
1323 int i, j, out_size, p_lambda, b_lambda, lambda2;
1324 int64_t best_rd = INT64_MAX;
1325 int best_b_count = -1;
1328 av_assert0(scale >= 0 && scale <= 3);
1330 pkt = av_packet_alloc();
1332 return AVERROR(ENOMEM);
1335 //s->next_picture_ptr->quality;
1336 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1337 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1338 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1339 if (!b_lambda) // FIXME we should do this somewhere else
1340 b_lambda = p_lambda;
1341 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1344 for (i = 0; i < s->max_b_frames + 2; i++) {
1345 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1346 s->next_picture_ptr;
1349 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1350 pre_input = *pre_input_ptr;
1351 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1353 if (!pre_input.shared && i) {
1354 data[0] += INPLACE_OFFSET;
1355 data[1] += INPLACE_OFFSET;
1356 data[2] += INPLACE_OFFSET;
1359 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1360 s->tmp_frames[i]->linesize[0],
1362 pre_input.f->linesize[0],
1364 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1365 s->tmp_frames[i]->linesize[1],
1367 pre_input.f->linesize[1],
1368 width >> 1, height >> 1);
1369 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1370 s->tmp_frames[i]->linesize[2],
1372 pre_input.f->linesize[2],
1373 width >> 1, height >> 1);
1377 for (j = 0; j < s->max_b_frames + 1; j++) {
1381 if (!s->input_picture[j])
1384 c = avcodec_alloc_context3(NULL);
1386 ret = AVERROR(ENOMEM);
1392 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1393 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1394 c->mb_decision = s->avctx->mb_decision;
1395 c->me_cmp = s->avctx->me_cmp;
1396 c->mb_cmp = s->avctx->mb_cmp;
1397 c->me_sub_cmp = s->avctx->me_sub_cmp;
1398 c->pix_fmt = AV_PIX_FMT_YUV420P;
1399 c->time_base = s->avctx->time_base;
1400 c->max_b_frames = s->max_b_frames;
1402 ret = avcodec_open2(c, codec, NULL);
1407 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1408 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1410 out_size = encode_frame(c, s->tmp_frames[0], pkt);
1416 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1418 for (i = 0; i < s->max_b_frames + 1; i++) {
1419 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1421 s->tmp_frames[i + 1]->pict_type = is_p ?
1422 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1423 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1425 out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1431 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1434 /* get the delayed frames */
1435 out_size = encode_frame(c, NULL, pkt);
1440 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1442 rd += c->error[0] + c->error[1] + c->error[2];
1450 avcodec_free_context(&c);
1451 av_packet_unref(pkt);
1458 av_packet_free(&pkt);
1460 return best_b_count;
1463 static int select_input_picture(MpegEncContext *s)
1467 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1468 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1469 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1471 /* set next picture type & ordering */
1472 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1473 if (s->frame_skip_threshold || s->frame_skip_factor) {
1474 if (s->picture_in_gop_number < s->gop_size &&
1475 s->next_picture_ptr &&
1476 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1477 // FIXME check that the gop check above is +-1 correct
1478 av_frame_unref(s->input_picture[0]->f);
1480 ff_vbv_update(s, 0);
1486 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1487 !s->next_picture_ptr || s->intra_only) {
1488 s->reordered_input_picture[0] = s->input_picture[0];
1489 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1490 s->reordered_input_picture[0]->f->coded_picture_number =
1491 s->coded_picture_number++;
1495 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1496 for (i = 0; i < s->max_b_frames + 1; i++) {
1497 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1499 if (pict_num >= s->rc_context.num_entries)
1501 if (!s->input_picture[i]) {
1502 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1506 s->input_picture[i]->f->pict_type =
1507 s->rc_context.entry[pict_num].new_pict_type;
1511 if (s->b_frame_strategy == 0) {
1512 b_frames = s->max_b_frames;
1513 while (b_frames && !s->input_picture[b_frames])
1515 } else if (s->b_frame_strategy == 1) {
1516 for (i = 1; i < s->max_b_frames + 1; i++) {
1517 if (s->input_picture[i] &&
1518 s->input_picture[i]->b_frame_score == 0) {
1519 s->input_picture[i]->b_frame_score =
1521 s->input_picture[i ]->f->data[0],
1522 s->input_picture[i - 1]->f->data[0],
1526 for (i = 0; i < s->max_b_frames + 1; i++) {
1527 if (!s->input_picture[i] ||
1528 s->input_picture[i]->b_frame_score - 1 >
1529 s->mb_num / s->b_sensitivity)
1533 b_frames = FFMAX(0, i - 1);
1536 for (i = 0; i < b_frames + 1; i++) {
1537 s->input_picture[i]->b_frame_score = 0;
1539 } else if (s->b_frame_strategy == 2) {
1540 b_frames = estimate_best_b_count(s);
1547 for (i = b_frames - 1; i >= 0; i--) {
1548 int type = s->input_picture[i]->f->pict_type;
1549 if (type && type != AV_PICTURE_TYPE_B)
1552 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1553 b_frames == s->max_b_frames) {
1554 av_log(s->avctx, AV_LOG_ERROR,
1555 "warning, too many B-frames in a row\n");
1558 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1559 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1560 s->gop_size > s->picture_in_gop_number) {
1561 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1563 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1565 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1569 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1570 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1573 s->reordered_input_picture[0] = s->input_picture[b_frames];
1574 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1575 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1576 s->reordered_input_picture[0]->f->coded_picture_number =
1577 s->coded_picture_number++;
1578 for (i = 0; i < b_frames; i++) {
1579 s->reordered_input_picture[i + 1] = s->input_picture[i];
1580 s->reordered_input_picture[i + 1]->f->pict_type =
1582 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1583 s->coded_picture_number++;
1588 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1590 if (s->reordered_input_picture[0]) {
1591 s->reordered_input_picture[0]->reference =
1592 s->reordered_input_picture[0]->f->pict_type !=
1593 AV_PICTURE_TYPE_B ? 3 : 0;
1595 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1598 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1599 // input is a shared pix, so we can't modify it -> allocate a new
1600 // one & ensure that the shared one is reuseable
1603 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1606 pic = &s->picture[i];
1608 pic->reference = s->reordered_input_picture[0]->reference;
1609 if (alloc_picture(s, pic, 0) < 0) {
1613 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1617 /* mark us unused / free shared pic */
1618 av_frame_unref(s->reordered_input_picture[0]->f);
1619 s->reordered_input_picture[0]->shared = 0;
1621 s->current_picture_ptr = pic;
1623 // input is not a shared pix -> reuse buffer for current_pix
1624 s->current_picture_ptr = s->reordered_input_picture[0];
1625 for (i = 0; i < 4; i++) {
1626 if (s->new_picture.f->data[i])
1627 s->new_picture.f->data[i] += INPLACE_OFFSET;
1630 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1631 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1632 s->current_picture_ptr)) < 0)
1635 s->picture_number = s->new_picture.f->display_picture_number;
1640 static void frame_end(MpegEncContext *s)
1642 if (s->unrestricted_mv &&
1643 s->current_picture.reference &&
1645 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1646 int hshift = desc->log2_chroma_w;
1647 int vshift = desc->log2_chroma_h;
1648 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1649 s->current_picture.f->linesize[0],
1650 s->h_edge_pos, s->v_edge_pos,
1651 EDGE_WIDTH, EDGE_WIDTH,
1652 EDGE_TOP | EDGE_BOTTOM);
1653 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1654 s->current_picture.f->linesize[1],
1655 s->h_edge_pos >> hshift,
1656 s->v_edge_pos >> vshift,
1657 EDGE_WIDTH >> hshift,
1658 EDGE_WIDTH >> vshift,
1659 EDGE_TOP | EDGE_BOTTOM);
1660 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1661 s->current_picture.f->linesize[2],
1662 s->h_edge_pos >> hshift,
1663 s->v_edge_pos >> vshift,
1664 EDGE_WIDTH >> hshift,
1665 EDGE_WIDTH >> vshift,
1666 EDGE_TOP | EDGE_BOTTOM);
1671 s->last_pict_type = s->pict_type;
1672 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1673 if (s->pict_type!= AV_PICTURE_TYPE_B)
1674 s->last_non_b_pict_type = s->pict_type;
1676 #if FF_API_CODED_FRAME
1677 FF_DISABLE_DEPRECATION_WARNINGS
1678 av_frame_unref(s->avctx->coded_frame);
1679 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1680 FF_ENABLE_DEPRECATION_WARNINGS
1682 #if FF_API_ERROR_FRAME
1683 FF_DISABLE_DEPRECATION_WARNINGS
1684 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1685 sizeof(s->current_picture.encoding_error));
1686 FF_ENABLE_DEPRECATION_WARNINGS
1690 static void update_noise_reduction(MpegEncContext *s)
1694 for (intra = 0; intra < 2; intra++) {
1695 if (s->dct_count[intra] > (1 << 16)) {
1696 for (i = 0; i < 64; i++) {
1697 s->dct_error_sum[intra][i] >>= 1;
1699 s->dct_count[intra] >>= 1;
1702 for (i = 0; i < 64; i++) {
1703 s->dct_offset[intra][i] = (s->noise_reduction *
1704 s->dct_count[intra] +
1705 s->dct_error_sum[intra][i] / 2) /
1706 (s->dct_error_sum[intra][i] + 1);
1711 static int frame_start(MpegEncContext *s)
1715 /* mark & release old frames */
1716 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1717 s->last_picture_ptr != s->next_picture_ptr &&
1718 s->last_picture_ptr->f->buf[0]) {
1719 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1722 s->current_picture_ptr->f->pict_type = s->pict_type;
1723 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1725 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1726 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1727 s->current_picture_ptr)) < 0)
1730 if (s->pict_type != AV_PICTURE_TYPE_B) {
1731 s->last_picture_ptr = s->next_picture_ptr;
1733 s->next_picture_ptr = s->current_picture_ptr;
1736 if (s->last_picture_ptr) {
1737 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1738 if (s->last_picture_ptr->f->buf[0] &&
1739 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1740 s->last_picture_ptr)) < 0)
1743 if (s->next_picture_ptr) {
1744 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1745 if (s->next_picture_ptr->f->buf[0] &&
1746 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1747 s->next_picture_ptr)) < 0)
1751 if (s->picture_structure!= PICT_FRAME) {
1753 for (i = 0; i < 4; i++) {
1754 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1755 s->current_picture.f->data[i] +=
1756 s->current_picture.f->linesize[i];
1758 s->current_picture.f->linesize[i] *= 2;
1759 s->last_picture.f->linesize[i] *= 2;
1760 s->next_picture.f->linesize[i] *= 2;
1764 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1765 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1766 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1767 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1768 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1769 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1771 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1772 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1775 if (s->dct_error_sum) {
1776 av_assert2(s->noise_reduction && s->encoding);
1777 update_noise_reduction(s);
1783 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1784 const AVFrame *pic_arg, int *got_packet)
1786 MpegEncContext *s = avctx->priv_data;
1787 int i, stuffing_count, ret;
1788 int context_count = s->slice_context_count;
1790 s->vbv_ignore_qmax = 0;
1792 s->picture_in_gop_number++;
1794 if (load_input_picture(s, pic_arg) < 0)
1797 if (select_input_picture(s) < 0) {
1802 if (s->new_picture.f->data[0]) {
1803 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1804 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1806 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1807 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1810 s->mb_info_ptr = av_packet_new_side_data(pkt,
1811 AV_PKT_DATA_H263_MB_INFO,
1812 s->mb_width*s->mb_height*12);
1813 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1816 for (i = 0; i < context_count; i++) {
1817 int start_y = s->thread_context[i]->start_mb_y;
1818 int end_y = s->thread_context[i]-> end_mb_y;
1819 int h = s->mb_height;
1820 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1821 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1823 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1826 s->pict_type = s->new_picture.f->pict_type;
1828 ret = frame_start(s);
1832 ret = encode_picture(s, s->picture_number);
1833 if (growing_buffer) {
1834 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1835 pkt->data = s->pb.buf;
1836 pkt->size = avctx->internal->byte_buffer_size;
1841 #if FF_API_STAT_BITS
1842 FF_DISABLE_DEPRECATION_WARNINGS
1843 avctx->header_bits = s->header_bits;
1844 avctx->mv_bits = s->mv_bits;
1845 avctx->misc_bits = s->misc_bits;
1846 avctx->i_tex_bits = s->i_tex_bits;
1847 avctx->p_tex_bits = s->p_tex_bits;
1848 avctx->i_count = s->i_count;
1849 // FIXME f/b_count in avctx
1850 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1851 avctx->skip_count = s->skip_count;
1852 FF_ENABLE_DEPRECATION_WARNINGS
1857 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
1858 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1860 if (avctx->rc_buffer_size) {
1861 RateControlContext *rcc = &s->rc_context;
1862 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1863 int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1864 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1866 if (put_bits_count(&s->pb) > max_size &&
1867 s->lambda < s->lmax) {
1868 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1869 (s->qscale + 1) / s->qscale);
1870 if (s->adaptive_quant) {
1872 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1873 s->lambda_table[i] =
1874 FFMAX(s->lambda_table[i] + min_step,
1875 s->lambda_table[i] * (s->qscale + 1) /
1878 s->mb_skipped = 0; // done in frame_start()
1879 // done in encode_picture() so we must undo it
1880 if (s->pict_type == AV_PICTURE_TYPE_P) {
1881 if (s->flipflop_rounding ||
1882 s->codec_id == AV_CODEC_ID_H263P ||
1883 s->codec_id == AV_CODEC_ID_MPEG4)
1884 s->no_rounding ^= 1;
1886 if (s->pict_type != AV_PICTURE_TYPE_B) {
1887 s->time_base = s->last_time_base;
1888 s->last_non_b_time = s->time - s->pp_time;
1890 for (i = 0; i < context_count; i++) {
1891 PutBitContext *pb = &s->thread_context[i]->pb;
1892 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1894 s->vbv_ignore_qmax = 1;
1895 av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1899 av_assert0(avctx->rc_max_rate);
1902 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1903 ff_write_pass1_stats(s);
1905 for (i = 0; i < 4; i++) {
1906 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1907 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1909 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1910 s->current_picture_ptr->encoding_error,
1911 (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1914 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1915 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1916 s->misc_bits + s->i_tex_bits +
1918 flush_put_bits(&s->pb);
1919 s->frame_bits = put_bits_count(&s->pb);
1921 stuffing_count = ff_vbv_update(s, s->frame_bits);
1922 s->stuffing_bits = 8*stuffing_count;
1923 if (stuffing_count) {
1924 if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1925 av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1929 switch (s->codec_id) {
1930 case AV_CODEC_ID_MPEG1VIDEO:
1931 case AV_CODEC_ID_MPEG2VIDEO:
1932 while (stuffing_count--) {
1933 put_bits(&s->pb, 8, 0);
1936 case AV_CODEC_ID_MPEG4:
1937 put_bits(&s->pb, 16, 0);
1938 put_bits(&s->pb, 16, 0x1C3);
1939 stuffing_count -= 4;
1940 while (stuffing_count--) {
1941 put_bits(&s->pb, 8, 0xFF);
1945 av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1947 flush_put_bits(&s->pb);
1948 s->frame_bits = put_bits_count(&s->pb);
1951 /* update MPEG-1/2 vbv_delay for CBR */
1952 if (avctx->rc_max_rate &&
1953 avctx->rc_min_rate == avctx->rc_max_rate &&
1954 s->out_format == FMT_MPEG1 &&
1955 90000LL * (avctx->rc_buffer_size - 1) <=
1956 avctx->rc_max_rate * 0xFFFFLL) {
1957 AVCPBProperties *props;
1960 int vbv_delay, min_delay;
1961 double inbits = avctx->rc_max_rate *
1962 av_q2d(avctx->time_base);
1963 int minbits = s->frame_bits - 8 *
1964 (s->vbv_delay_ptr - s->pb.buf - 1);
1965 double bits = s->rc_context.buffer_index + minbits - inbits;
1968 av_log(avctx, AV_LOG_ERROR,
1969 "Internal error, negative bits\n");
1971 av_assert1(s->repeat_first_field == 0);
1973 vbv_delay = bits * 90000 / avctx->rc_max_rate;
1974 min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
1977 vbv_delay = FFMAX(vbv_delay, min_delay);
1979 av_assert0(vbv_delay < 0xFFFF);
1981 s->vbv_delay_ptr[0] &= 0xF8;
1982 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1983 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1984 s->vbv_delay_ptr[2] &= 0x07;
1985 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1987 props = av_cpb_properties_alloc(&props_size);
1989 return AVERROR(ENOMEM);
1990 props->vbv_delay = vbv_delay * 300;
1992 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
1993 (uint8_t*)props, props_size);
1999 #if FF_API_VBV_DELAY
2000 FF_DISABLE_DEPRECATION_WARNINGS
2001 avctx->vbv_delay = vbv_delay * 300;
2002 FF_ENABLE_DEPRECATION_WARNINGS
2005 s->total_bits += s->frame_bits;
2006 #if FF_API_STAT_BITS
2007 FF_DISABLE_DEPRECATION_WARNINGS
2008 avctx->frame_bits = s->frame_bits;
2009 FF_ENABLE_DEPRECATION_WARNINGS
2013 pkt->pts = s->current_picture.f->pts;
2014 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2015 if (!s->current_picture.f->coded_picture_number)
2016 pkt->dts = pkt->pts - s->dts_delta;
2018 pkt->dts = s->reordered_pts;
2019 s->reordered_pts = pkt->pts;
2021 pkt->dts = pkt->pts;
2022 if (s->current_picture.f->key_frame)
2023 pkt->flags |= AV_PKT_FLAG_KEY;
2025 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2030 /* release non-reference frames */
2031 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2032 if (!s->picture[i].reference)
2033 ff_mpeg_unref_picture(avctx, &s->picture[i]);
2036 av_assert1((s->frame_bits & 7) == 0);
2038 pkt->size = s->frame_bits / 8;
2039 *got_packet = !!pkt->size;
2043 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2044 int n, int threshold)
2046 static const char tab[64] = {
2047 3, 2, 2, 1, 1, 1, 1, 1,
2048 1, 1, 1, 1, 1, 1, 1, 1,
2049 1, 1, 1, 1, 1, 1, 1, 1,
2050 0, 0, 0, 0, 0, 0, 0, 0,
2051 0, 0, 0, 0, 0, 0, 0, 0,
2052 0, 0, 0, 0, 0, 0, 0, 0,
2053 0, 0, 0, 0, 0, 0, 0, 0,
2054 0, 0, 0, 0, 0, 0, 0, 0
2059 int16_t *block = s->block[n];
2060 const int last_index = s->block_last_index[n];
2063 if (threshold < 0) {
2065 threshold = -threshold;
2069 /* Are all we could set to zero already zero? */
2070 if (last_index <= skip_dc - 1)
2073 for (i = 0; i <= last_index; i++) {
2074 const int j = s->intra_scantable.permutated[i];
2075 const int level = FFABS(block[j]);
2077 if (skip_dc && i == 0)
2081 } else if (level > 1) {
2087 if (score >= threshold)
2089 for (i = skip_dc; i <= last_index; i++) {
2090 const int j = s->intra_scantable.permutated[i];
2094 s->block_last_index[n] = 0;
2096 s->block_last_index[n] = -1;
2099 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2103 const int maxlevel = s->max_qcoeff;
2104 const int minlevel = s->min_qcoeff;
2108 i = 1; // skip clipping of intra dc
2112 for (; i <= last_index; i++) {
2113 const int j = s->intra_scantable.permutated[i];
2114 int level = block[j];
2116 if (level > maxlevel) {
2119 } else if (level < minlevel) {
2127 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2128 av_log(s->avctx, AV_LOG_INFO,
2129 "warning, clipping %d dct coefficients to %d..%d\n",
2130 overflow, minlevel, maxlevel);
2133 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2137 for (y = 0; y < 8; y++) {
2138 for (x = 0; x < 8; x++) {
2144 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2145 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2146 int v = ptr[x2 + y2 * stride];
2152 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2157 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2158 int motion_x, int motion_y,
2159 int mb_block_height,
2163 int16_t weight[12][64];
2164 int16_t orig[12][64];
2165 const int mb_x = s->mb_x;
2166 const int mb_y = s->mb_y;
2169 int dct_offset = s->linesize * 8; // default for progressive frames
2170 int uv_dct_offset = s->uvlinesize * 8;
2171 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2172 ptrdiff_t wrap_y, wrap_c;
2174 for (i = 0; i < mb_block_count; i++)
2175 skip_dct[i] = s->skipdct;
2177 if (s->adaptive_quant) {
2178 const int last_qp = s->qscale;
2179 const int mb_xy = mb_x + mb_y * s->mb_stride;
2181 s->lambda = s->lambda_table[mb_xy];
2184 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2185 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2186 s->dquant = s->qscale - last_qp;
2188 if (s->out_format == FMT_H263) {
2189 s->dquant = av_clip(s->dquant, -2, 2);
2191 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2193 if (s->pict_type == AV_PICTURE_TYPE_B) {
2194 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2197 if (s->mv_type == MV_TYPE_8X8)
2203 ff_set_qscale(s, last_qp + s->dquant);
2204 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2205 ff_set_qscale(s, s->qscale + s->dquant);
2207 wrap_y = s->linesize;
2208 wrap_c = s->uvlinesize;
2209 ptr_y = s->new_picture.f->data[0] +
2210 (mb_y * 16 * wrap_y) + mb_x * 16;
2211 ptr_cb = s->new_picture.f->data[1] +
2212 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2213 ptr_cr = s->new_picture.f->data[2] +
2214 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2216 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2217 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2218 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2219 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2220 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2222 16, 16, mb_x * 16, mb_y * 16,
2223 s->width, s->height);
2225 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2227 mb_block_width, mb_block_height,
2228 mb_x * mb_block_width, mb_y * mb_block_height,
2230 ptr_cb = ebuf + 16 * wrap_y;
2231 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2233 mb_block_width, mb_block_height,
2234 mb_x * mb_block_width, mb_y * mb_block_height,
2236 ptr_cr = ebuf + 16 * wrap_y + 16;
2240 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2241 int progressive_score, interlaced_score;
2243 s->interlaced_dct = 0;
2244 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2245 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2246 NULL, wrap_y, 8) - 400;
2248 if (progressive_score > 0) {
2249 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2250 NULL, wrap_y * 2, 8) +
2251 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2252 NULL, wrap_y * 2, 8);
2253 if (progressive_score > interlaced_score) {
2254 s->interlaced_dct = 1;
2256 dct_offset = wrap_y;
2257 uv_dct_offset = wrap_c;
2259 if (s->chroma_format == CHROMA_422 ||
2260 s->chroma_format == CHROMA_444)
2266 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2267 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2268 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2269 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2271 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2275 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2276 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2277 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2278 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2279 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2280 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2281 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2282 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2283 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2284 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2285 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2286 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2290 op_pixels_func (*op_pix)[4];
2291 qpel_mc_func (*op_qpix)[16];
2292 uint8_t *dest_y, *dest_cb, *dest_cr;
2294 dest_y = s->dest[0];
2295 dest_cb = s->dest[1];
2296 dest_cr = s->dest[2];
2298 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2299 op_pix = s->hdsp.put_pixels_tab;
2300 op_qpix = s->qdsp.put_qpel_pixels_tab;
2302 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2303 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2306 if (s->mv_dir & MV_DIR_FORWARD) {
2307 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2308 s->last_picture.f->data,
2310 op_pix = s->hdsp.avg_pixels_tab;
2311 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2313 if (s->mv_dir & MV_DIR_BACKWARD) {
2314 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2315 s->next_picture.f->data,
2319 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2320 int progressive_score, interlaced_score;
2322 s->interlaced_dct = 0;
2323 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2324 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2328 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2329 progressive_score -= 400;
2331 if (progressive_score > 0) {
2332 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2334 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2338 if (progressive_score > interlaced_score) {
2339 s->interlaced_dct = 1;
2341 dct_offset = wrap_y;
2342 uv_dct_offset = wrap_c;
2344 if (s->chroma_format == CHROMA_422)
2350 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2351 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2352 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2353 dest_y + dct_offset, wrap_y);
2354 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2355 dest_y + dct_offset + 8, wrap_y);
2357 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2361 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2362 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2363 if (!s->chroma_y_shift) { /* 422 */
2364 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2365 dest_cb + uv_dct_offset, wrap_c);
2366 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2367 dest_cr + uv_dct_offset, wrap_c);
2370 /* pre quantization */
2371 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2372 2 * s->qscale * s->qscale) {
2374 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2376 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2378 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2379 wrap_y, 8) < 20 * s->qscale)
2381 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2382 wrap_y, 8) < 20 * s->qscale)
2384 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2386 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2388 if (!s->chroma_y_shift) { /* 422 */
2389 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2390 dest_cb + uv_dct_offset,
2391 wrap_c, 8) < 20 * s->qscale)
2393 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2394 dest_cr + uv_dct_offset,
2395 wrap_c, 8) < 20 * s->qscale)
2401 if (s->quantizer_noise_shaping) {
2403 get_visual_weight(weight[0], ptr_y , wrap_y);
2405 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2407 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2409 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2411 get_visual_weight(weight[4], ptr_cb , wrap_c);
2413 get_visual_weight(weight[5], ptr_cr , wrap_c);
2414 if (!s->chroma_y_shift) { /* 422 */
2416 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2419 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2422 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2425 /* DCT & quantize */
2426 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2428 for (i = 0; i < mb_block_count; i++) {
2431 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2432 // FIXME we could decide to change to quantizer instead of
2434 // JS: I don't think that would be a good idea it could lower
2435 // quality instead of improve it. Just INTRADC clipping
2436 // deserves changes in quantizer
2438 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2440 s->block_last_index[i] = -1;
2442 if (s->quantizer_noise_shaping) {
2443 for (i = 0; i < mb_block_count; i++) {
2445 s->block_last_index[i] =
2446 dct_quantize_refine(s, s->block[i], weight[i],
2447 orig[i], i, s->qscale);
2452 if (s->luma_elim_threshold && !s->mb_intra)
2453 for (i = 0; i < 4; i++)
2454 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2455 if (s->chroma_elim_threshold && !s->mb_intra)
2456 for (i = 4; i < mb_block_count; i++)
2457 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2459 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2460 for (i = 0; i < mb_block_count; i++) {
2461 if (s->block_last_index[i] == -1)
2462 s->coded_score[i] = INT_MAX / 256;
2467 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2468 s->block_last_index[4] =
2469 s->block_last_index[5] = 0;
2471 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2472 if (!s->chroma_y_shift) { /* 422 / 444 */
2473 for (i=6; i<12; i++) {
2474 s->block_last_index[i] = 0;
2475 s->block[i][0] = s->block[4][0];
2480 // non c quantize code returns incorrect block_last_index FIXME
2481 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2482 for (i = 0; i < mb_block_count; i++) {
2484 if (s->block_last_index[i] > 0) {
2485 for (j = 63; j > 0; j--) {
2486 if (s->block[i][s->intra_scantable.permutated[j]])
2489 s->block_last_index[i] = j;
2494 /* huffman encode */
2495 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2496 case AV_CODEC_ID_MPEG1VIDEO:
2497 case AV_CODEC_ID_MPEG2VIDEO:
2498 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2499 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2501 case AV_CODEC_ID_MPEG4:
2502 if (CONFIG_MPEG4_ENCODER)
2503 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2505 case AV_CODEC_ID_MSMPEG4V2:
2506 case AV_CODEC_ID_MSMPEG4V3:
2507 case AV_CODEC_ID_WMV1:
2508 if (CONFIG_MSMPEG4_ENCODER)
2509 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2511 case AV_CODEC_ID_WMV2:
2512 if (CONFIG_WMV2_ENCODER)
2513 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2515 case AV_CODEC_ID_H261:
2516 if (CONFIG_H261_ENCODER)
2517 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2519 case AV_CODEC_ID_H263:
2520 case AV_CODEC_ID_H263P:
2521 case AV_CODEC_ID_FLV1:
2522 case AV_CODEC_ID_RV10:
2523 case AV_CODEC_ID_RV20:
2524 if (CONFIG_H263_ENCODER)
2525 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2527 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2528 case AV_CODEC_ID_MJPEG:
2529 case AV_CODEC_ID_AMV:
2530 ff_mjpeg_encode_mb(s, s->block);
2533 case AV_CODEC_ID_SPEEDHQ:
2534 if (CONFIG_SPEEDHQ_ENCODER)
2535 ff_speedhq_encode_mb(s, s->block);
2542 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2544 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2545 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2546 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2549 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2552 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2555 d->mb_skip_run= s->mb_skip_run;
2557 d->last_dc[i] = s->last_dc[i];
2560 d->mv_bits= s->mv_bits;
2561 d->i_tex_bits= s->i_tex_bits;
2562 d->p_tex_bits= s->p_tex_bits;
2563 d->i_count= s->i_count;
2564 d->f_count= s->f_count;
2565 d->b_count= s->b_count;
2566 d->skip_count= s->skip_count;
2567 d->misc_bits= s->misc_bits;
2571 d->qscale= s->qscale;
2572 d->dquant= s->dquant;
2574 d->esc3_level_length= s->esc3_level_length;
2577 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2580 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2581 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2584 d->mb_skip_run= s->mb_skip_run;
2586 d->last_dc[i] = s->last_dc[i];
2589 d->mv_bits= s->mv_bits;
2590 d->i_tex_bits= s->i_tex_bits;
2591 d->p_tex_bits= s->p_tex_bits;
2592 d->i_count= s->i_count;
2593 d->f_count= s->f_count;
2594 d->b_count= s->b_count;
2595 d->skip_count= s->skip_count;
2596 d->misc_bits= s->misc_bits;
2598 d->mb_intra= s->mb_intra;
2599 d->mb_skipped= s->mb_skipped;
2600 d->mv_type= s->mv_type;
2601 d->mv_dir= s->mv_dir;
2603 if(s->data_partitioning){
2605 d->tex_pb= s->tex_pb;
2609 d->block_last_index[i]= s->block_last_index[i];
2610 d->interlaced_dct= s->interlaced_dct;
2611 d->qscale= s->qscale;
2613 d->esc3_level_length= s->esc3_level_length;
2616 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2617 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2618 int *dmin, int *next_block, int motion_x, int motion_y)
2621 uint8_t *dest_backup[3];
2623 copy_context_before_encode(s, backup, type);
2625 s->block= s->blocks[*next_block];
2626 s->pb= pb[*next_block];
2627 if(s->data_partitioning){
2628 s->pb2 = pb2 [*next_block];
2629 s->tex_pb= tex_pb[*next_block];
2633 memcpy(dest_backup, s->dest, sizeof(s->dest));
2634 s->dest[0] = s->sc.rd_scratchpad;
2635 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2636 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2637 av_assert0(s->linesize >= 32); //FIXME
2640 encode_mb(s, motion_x, motion_y);
2642 score= put_bits_count(&s->pb);
2643 if(s->data_partitioning){
2644 score+= put_bits_count(&s->pb2);
2645 score+= put_bits_count(&s->tex_pb);
2648 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2649 ff_mpv_reconstruct_mb(s, s->block);
2651 score *= s->lambda2;
2652 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2656 memcpy(s->dest, dest_backup, sizeof(s->dest));
2663 copy_context_after_encode(best, s, type);
2667 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2668 const uint32_t *sq = ff_square_tab + 256;
2673 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2674 else if(w==8 && h==8)
2675 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2679 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2688 static int sse_mb(MpegEncContext *s){
2692 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2693 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2696 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2697 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2698 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2699 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2701 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2702 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2703 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2706 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2707 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2708 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2711 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2712 MpegEncContext *s= *(void**)arg;
2716 s->me.dia_size= s->avctx->pre_dia_size;
2717 s->first_slice_line=1;
2718 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2719 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2720 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2722 s->first_slice_line=0;
2730 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2731 MpegEncContext *s= *(void**)arg;
2733 s->me.dia_size= s->avctx->dia_size;
2734 s->first_slice_line=1;
2735 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2736 s->mb_x=0; //for block init below
2737 ff_init_block_index(s);
2738 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2739 s->block_index[0]+=2;
2740 s->block_index[1]+=2;
2741 s->block_index[2]+=2;
2742 s->block_index[3]+=2;
2744 /* compute motion vector & mb_type and store in context */
2745 if(s->pict_type==AV_PICTURE_TYPE_B)
2746 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2748 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2750 s->first_slice_line=0;
2755 static int mb_var_thread(AVCodecContext *c, void *arg){
2756 MpegEncContext *s= *(void**)arg;
2759 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2760 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2763 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2765 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2767 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2768 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2770 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2771 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2772 s->me.mb_var_sum_temp += varc;
2778 static void write_slice_end(MpegEncContext *s){
2779 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2780 if(s->partitioned_frame){
2781 ff_mpeg4_merge_partitions(s);
2784 ff_mpeg4_stuffing(&s->pb);
2785 } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2786 s->out_format == FMT_MJPEG) {
2787 ff_mjpeg_encode_stuffing(s);
2788 } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2789 ff_speedhq_end_slice(s);
2792 flush_put_bits(&s->pb);
2794 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2795 s->misc_bits+= get_bits_diff(s);
2798 static void write_mb_info(MpegEncContext *s)
2800 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2801 int offset = put_bits_count(&s->pb);
2802 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2803 int gobn = s->mb_y / s->gob_index;
2805 if (CONFIG_H263_ENCODER)
2806 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2807 bytestream_put_le32(&ptr, offset);
2808 bytestream_put_byte(&ptr, s->qscale);
2809 bytestream_put_byte(&ptr, gobn);
2810 bytestream_put_le16(&ptr, mba);
2811 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2812 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2813 /* 4MV not implemented */
2814 bytestream_put_byte(&ptr, 0); /* hmv2 */
2815 bytestream_put_byte(&ptr, 0); /* vmv2 */
2818 static void update_mb_info(MpegEncContext *s, int startcode)
2822 if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2823 s->mb_info_size += 12;
2824 s->prev_mb_info = s->last_mb_info;
2827 s->prev_mb_info = put_bytes_count(&s->pb, 0);
2828 /* This might have incremented mb_info_size above, and we return without
2829 * actually writing any info into that slot yet. But in that case,
2830 * this will be called again at the start of the after writing the
2831 * start code, actually writing the mb info. */
2835 s->last_mb_info = put_bytes_count(&s->pb, 0);
2836 if (!s->mb_info_size)
2837 s->mb_info_size += 12;
2841 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2843 if (put_bytes_left(&s->pb, 0) < threshold
2844 && s->slice_context_count == 1
2845 && s->pb.buf == s->avctx->internal->byte_buffer) {
2846 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2847 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2849 uint8_t *new_buffer = NULL;
2850 int new_buffer_size = 0;
2852 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2853 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2854 return AVERROR(ENOMEM);
2859 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2860 s->avctx->internal->byte_buffer_size + size_increase);
2862 return AVERROR(ENOMEM);
2864 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2865 av_free(s->avctx->internal->byte_buffer);
2866 s->avctx->internal->byte_buffer = new_buffer;
2867 s->avctx->internal->byte_buffer_size = new_buffer_size;
2868 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2869 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2870 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2872 if (put_bytes_left(&s->pb, 0) < threshold)
2873 return AVERROR(EINVAL);
2877 static int encode_thread(AVCodecContext *c, void *arg){
2878 MpegEncContext *s= *(void**)arg;
2879 int mb_x, mb_y, mb_y_order;
2880 int chr_h= 16>>s->chroma_y_shift;
2882 MpegEncContext best_s = { 0 }, backup_s;
2883 uint8_t bit_buf[2][MAX_MB_BYTES];
2884 uint8_t bit_buf2[2][MAX_MB_BYTES];
2885 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2886 PutBitContext pb[2], pb2[2], tex_pb[2];
2889 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2890 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2891 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2894 s->last_bits= put_bits_count(&s->pb);
2905 /* init last dc values */
2906 /* note: quant matrix value (8) is implied here */
2907 s->last_dc[i] = 128 << s->intra_dc_precision;
2909 s->current_picture.encoding_error[i] = 0;
2911 if(s->codec_id==AV_CODEC_ID_AMV){
2912 s->last_dc[0] = 128*8/13;
2913 s->last_dc[1] = 128*8/14;
2914 s->last_dc[2] = 128*8/14;
2917 memset(s->last_mv, 0, sizeof(s->last_mv));
2921 switch(s->codec_id){
2922 case AV_CODEC_ID_H263:
2923 case AV_CODEC_ID_H263P:
2924 case AV_CODEC_ID_FLV1:
2925 if (CONFIG_H263_ENCODER)
2926 s->gob_index = H263_GOB_HEIGHT(s->height);
2928 case AV_CODEC_ID_MPEG4:
2929 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2930 ff_mpeg4_init_partitions(s);
2936 s->first_slice_line = 1;
2937 s->ptr_lastgob = s->pb.buf;
2938 for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2939 if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
2941 mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
2942 if (first_in_slice && mb_y_order != s->start_mb_y)
2943 ff_speedhq_end_slice(s);
2944 s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
2951 ff_set_qscale(s, s->qscale);
2952 ff_init_block_index(s);
2954 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2955 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2956 int mb_type= s->mb_type[xy];
2960 int size_increase = s->avctx->internal->byte_buffer_size/4
2961 + s->mb_width*MAX_MB_BYTES;
2963 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2964 if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
2965 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2968 if(s->data_partitioning){
2969 if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
2970 put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
2971 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2977 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2978 ff_update_block_index(s);
2980 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2981 ff_h261_reorder_mb_index(s);
2982 xy= s->mb_y*s->mb_stride + s->mb_x;
2983 mb_type= s->mb_type[xy];
2986 /* write gob / video packet header */
2988 int current_packet_size, is_gob_start;
2990 current_packet_size = put_bytes_count(&s->pb, 1)
2991 - (s->ptr_lastgob - s->pb.buf);
2993 is_gob_start = s->rtp_payload_size &&
2994 current_packet_size >= s->rtp_payload_size &&
2997 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2999 switch(s->codec_id){
3000 case AV_CODEC_ID_H263:
3001 case AV_CODEC_ID_H263P:
3002 if(!s->h263_slice_structured)
3003 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3005 case AV_CODEC_ID_MPEG2VIDEO:
3006 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3007 case AV_CODEC_ID_MPEG1VIDEO:
3008 if(s->mb_skip_run) is_gob_start=0;
3010 case AV_CODEC_ID_MJPEG:
3011 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3016 if(s->start_mb_y != mb_y || mb_x!=0){
3019 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3020 ff_mpeg4_init_partitions(s);
3024 av_assert2((put_bits_count(&s->pb)&7) == 0);
3025 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3027 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3028 int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
3029 int d = 100 / s->error_rate;
3031 current_packet_size=0;
3032 s->pb.buf_ptr= s->ptr_lastgob;
3033 av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3037 #if FF_API_RTP_CALLBACK
3038 FF_DISABLE_DEPRECATION_WARNINGS
3039 if (s->avctx->rtp_callback){
3040 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3041 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3043 FF_ENABLE_DEPRECATION_WARNINGS
3045 update_mb_info(s, 1);
3047 switch(s->codec_id){
3048 case AV_CODEC_ID_MPEG4:
3049 if (CONFIG_MPEG4_ENCODER) {
3050 ff_mpeg4_encode_video_packet_header(s);
3051 ff_mpeg4_clean_buffers(s);
3054 case AV_CODEC_ID_MPEG1VIDEO:
3055 case AV_CODEC_ID_MPEG2VIDEO:
3056 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3057 ff_mpeg1_encode_slice_header(s);
3058 ff_mpeg1_clean_buffers(s);
3061 case AV_CODEC_ID_H263:
3062 case AV_CODEC_ID_H263P:
3063 if (CONFIG_H263_ENCODER)
3064 ff_h263_encode_gob_header(s, mb_y);
3068 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3069 int bits= put_bits_count(&s->pb);
3070 s->misc_bits+= bits - s->last_bits;
3074 s->ptr_lastgob += current_packet_size;
3075 s->first_slice_line=1;
3076 s->resync_mb_x=mb_x;
3077 s->resync_mb_y=mb_y;
3081 if( (s->resync_mb_x == s->mb_x)
3082 && s->resync_mb_y+1 == s->mb_y){
3083 s->first_slice_line=0;
3087 s->dquant=0; //only for QP_RD
3089 update_mb_info(s, 0);
3091 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3093 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3095 copy_context_before_encode(&backup_s, s, -1);
3097 best_s.data_partitioning= s->data_partitioning;
3098 best_s.partitioned_frame= s->partitioned_frame;
3099 if(s->data_partitioning){
3100 backup_s.pb2= s->pb2;
3101 backup_s.tex_pb= s->tex_pb;
3104 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3105 s->mv_dir = MV_DIR_FORWARD;
3106 s->mv_type = MV_TYPE_16X16;
3108 s->mv[0][0][0] = s->p_mv_table[xy][0];
3109 s->mv[0][0][1] = s->p_mv_table[xy][1];
3110 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3111 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3113 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3114 s->mv_dir = MV_DIR_FORWARD;
3115 s->mv_type = MV_TYPE_FIELD;
3118 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3119 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3120 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3122 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3123 &dmin, &next_block, 0, 0);
3125 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3126 s->mv_dir = MV_DIR_FORWARD;
3127 s->mv_type = MV_TYPE_16X16;
3131 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3132 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3134 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3135 s->mv_dir = MV_DIR_FORWARD;
3136 s->mv_type = MV_TYPE_8X8;
3139 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3140 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3142 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3143 &dmin, &next_block, 0, 0);
3145 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3146 s->mv_dir = MV_DIR_FORWARD;
3147 s->mv_type = MV_TYPE_16X16;
3149 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3150 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3151 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3152 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3154 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3155 s->mv_dir = MV_DIR_BACKWARD;
3156 s->mv_type = MV_TYPE_16X16;
3158 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3159 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3160 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3161 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3163 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3164 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3165 s->mv_type = MV_TYPE_16X16;
3167 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3168 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3169 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3170 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3171 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3172 &dmin, &next_block, 0, 0);
3174 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3175 s->mv_dir = MV_DIR_FORWARD;
3176 s->mv_type = MV_TYPE_FIELD;
3179 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3180 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3181 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3183 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3184 &dmin, &next_block, 0, 0);
3186 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3187 s->mv_dir = MV_DIR_BACKWARD;
3188 s->mv_type = MV_TYPE_FIELD;
3191 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3192 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3193 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3195 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3196 &dmin, &next_block, 0, 0);
3198 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3199 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3200 s->mv_type = MV_TYPE_FIELD;
3202 for(dir=0; dir<2; dir++){
3204 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3205 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3206 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3209 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3210 &dmin, &next_block, 0, 0);
3212 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3214 s->mv_type = MV_TYPE_16X16;
3218 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3219 &dmin, &next_block, 0, 0);
3220 if(s->h263_pred || s->h263_aic){
3222 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3224 ff_clean_intra_table_entries(s); //old mode?
3228 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3229 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3230 const int last_qp= backup_s.qscale;
3233 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3234 static const int dquant_tab[4]={-1,1,-2,2};
3235 int storecoefs = s->mb_intra && s->dc_val[0];
3237 av_assert2(backup_s.dquant == 0);
3240 s->mv_dir= best_s.mv_dir;
3241 s->mv_type = MV_TYPE_16X16;
3242 s->mb_intra= best_s.mb_intra;
3243 s->mv[0][0][0] = best_s.mv[0][0][0];
3244 s->mv[0][0][1] = best_s.mv[0][0][1];
3245 s->mv[1][0][0] = best_s.mv[1][0][0];
3246 s->mv[1][0][1] = best_s.mv[1][0][1];
3248 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3249 for(; qpi<4; qpi++){
3250 int dquant= dquant_tab[qpi];
3251 qp= last_qp + dquant;
3252 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3254 backup_s.dquant= dquant;
3257 dc[i]= s->dc_val[0][ s->block_index[i] ];
3258 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3262 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3263 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3264 if(best_s.qscale != qp){
3267 s->dc_val[0][ s->block_index[i] ]= dc[i];
3268 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3275 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3276 int mx= s->b_direct_mv_table[xy][0];
3277 int my= s->b_direct_mv_table[xy][1];
3279 backup_s.dquant = 0;
3280 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3282 ff_mpeg4_set_direct_mv(s, mx, my);
3283 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3284 &dmin, &next_block, mx, my);
3286 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3287 backup_s.dquant = 0;
3288 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3290 ff_mpeg4_set_direct_mv(s, 0, 0);
3291 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3292 &dmin, &next_block, 0, 0);
3294 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3297 coded |= s->block_last_index[i];
3300 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3301 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3302 mx=my=0; //FIXME find the one we actually used
3303 ff_mpeg4_set_direct_mv(s, mx, my);
3304 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3312 s->mv_dir= best_s.mv_dir;
3313 s->mv_type = best_s.mv_type;
3315 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3316 s->mv[0][0][1] = best_s.mv[0][0][1];
3317 s->mv[1][0][0] = best_s.mv[1][0][0];
3318 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3321 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3322 &dmin, &next_block, mx, my);
3327 s->current_picture.qscale_table[xy] = best_s.qscale;
3329 copy_context_after_encode(s, &best_s, -1);
3331 pb_bits_count= put_bits_count(&s->pb);
3332 flush_put_bits(&s->pb);
3333 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3336 if(s->data_partitioning){
3337 pb2_bits_count= put_bits_count(&s->pb2);
3338 flush_put_bits(&s->pb2);
3339 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3340 s->pb2= backup_s.pb2;
3342 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3343 flush_put_bits(&s->tex_pb);
3344 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3345 s->tex_pb= backup_s.tex_pb;
3347 s->last_bits= put_bits_count(&s->pb);
3349 if (CONFIG_H263_ENCODER &&
3350 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3351 ff_h263_update_motion_val(s);
3353 if(next_block==0){ //FIXME 16 vs linesize16
3354 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3355 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3356 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3359 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3360 ff_mpv_reconstruct_mb(s, s->block);
3362 int motion_x = 0, motion_y = 0;
3363 s->mv_type=MV_TYPE_16X16;
3364 // only one MB-Type possible
3367 case CANDIDATE_MB_TYPE_INTRA:
3370 motion_x= s->mv[0][0][0] = 0;
3371 motion_y= s->mv[0][0][1] = 0;
3373 case CANDIDATE_MB_TYPE_INTER:
3374 s->mv_dir = MV_DIR_FORWARD;
3376 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3377 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3379 case CANDIDATE_MB_TYPE_INTER_I:
3380 s->mv_dir = MV_DIR_FORWARD;
3381 s->mv_type = MV_TYPE_FIELD;
3384 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3385 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3386 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3389 case CANDIDATE_MB_TYPE_INTER4V:
3390 s->mv_dir = MV_DIR_FORWARD;
3391 s->mv_type = MV_TYPE_8X8;
3394 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3395 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3398 case CANDIDATE_MB_TYPE_DIRECT:
3399 if (CONFIG_MPEG4_ENCODER) {
3400 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3402 motion_x=s->b_direct_mv_table[xy][0];
3403 motion_y=s->b_direct_mv_table[xy][1];
3404 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3407 case CANDIDATE_MB_TYPE_DIRECT0:
3408 if (CONFIG_MPEG4_ENCODER) {
3409 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3411 ff_mpeg4_set_direct_mv(s, 0, 0);
3414 case CANDIDATE_MB_TYPE_BIDIR:
3415 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3417 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3418 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3419 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3420 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3422 case CANDIDATE_MB_TYPE_BACKWARD:
3423 s->mv_dir = MV_DIR_BACKWARD;
3425 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3426 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3428 case CANDIDATE_MB_TYPE_FORWARD:
3429 s->mv_dir = MV_DIR_FORWARD;
3431 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3432 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3434 case CANDIDATE_MB_TYPE_FORWARD_I:
3435 s->mv_dir = MV_DIR_FORWARD;
3436 s->mv_type = MV_TYPE_FIELD;
3439 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3440 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3441 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3444 case CANDIDATE_MB_TYPE_BACKWARD_I:
3445 s->mv_dir = MV_DIR_BACKWARD;
3446 s->mv_type = MV_TYPE_FIELD;
3449 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3450 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3451 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3454 case CANDIDATE_MB_TYPE_BIDIR_I:
3455 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3456 s->mv_type = MV_TYPE_FIELD;
3458 for(dir=0; dir<2; dir++){
3460 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3461 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3462 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3467 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3470 encode_mb(s, motion_x, motion_y);
3472 // RAL: Update last macroblock type
3473 s->last_mv_dir = s->mv_dir;
3475 if (CONFIG_H263_ENCODER &&
3476 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3477 ff_h263_update_motion_val(s);
3479 ff_mpv_reconstruct_mb(s, s->block);
3482 /* clean the MV table in IPS frames for direct mode in B-frames */
3483 if(s->mb_intra /* && I,P,S_TYPE */){
3484 s->p_mv_table[xy][0]=0;
3485 s->p_mv_table[xy][1]=0;
3488 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3492 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3493 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3495 s->current_picture.encoding_error[0] += sse(
3496 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3497 s->dest[0], w, h, s->linesize);
3498 s->current_picture.encoding_error[1] += sse(
3499 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3500 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3501 s->current_picture.encoding_error[2] += sse(
3502 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3503 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3506 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3507 ff_h263_loop_filter(s);
3509 ff_dlog(s->avctx, "MB %d %d bits\n",
3510 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3514 //not beautiful here but we must write it before flushing so it has to be here
3515 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3516 ff_msmpeg4_encode_ext_header(s);
3520 #if FF_API_RTP_CALLBACK
3521 FF_DISABLE_DEPRECATION_WARNINGS
3522 /* Send the last GOB if RTP */
3523 if (s->avctx->rtp_callback) {
3524 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3525 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3526 /* Call the RTP callback to send the last GOB */
3528 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3530 FF_ENABLE_DEPRECATION_WARNINGS
3536 #define MERGE(field) dst->field += src->field; src->field=0
3537 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3538 MERGE(me.scene_change_score);
3539 MERGE(me.mc_mb_var_sum_temp);
3540 MERGE(me.mb_var_sum_temp);
3543 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3546 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3547 MERGE(dct_count[1]);
3556 MERGE(er.error_count);
3557 MERGE(padding_bug_score);
3558 MERGE(current_picture.encoding_error[0]);
3559 MERGE(current_picture.encoding_error[1]);
3560 MERGE(current_picture.encoding_error[2]);
3562 if (dst->noise_reduction){
3563 for(i=0; i<64; i++){
3564 MERGE(dct_error_sum[0][i]);
3565 MERGE(dct_error_sum[1][i]);
3569 av_assert1(put_bits_count(&src->pb) % 8 ==0);
3570 av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3571 ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3572 flush_put_bits(&dst->pb);
3575 static int estimate_qp(MpegEncContext *s, int dry_run){
3576 if (s->next_lambda){
3577 s->current_picture_ptr->f->quality =
3578 s->current_picture.f->quality = s->next_lambda;
3579 if(!dry_run) s->next_lambda= 0;
3580 } else if (!s->fixed_qscale) {
3581 int quality = ff_rate_estimate_qscale(s, dry_run);
3582 s->current_picture_ptr->f->quality =
3583 s->current_picture.f->quality = quality;
3584 if (s->current_picture.f->quality < 0)
3588 if(s->adaptive_quant){
3589 switch(s->codec_id){
3590 case AV_CODEC_ID_MPEG4:
3591 if (CONFIG_MPEG4_ENCODER)
3592 ff_clean_mpeg4_qscales(s);
3594 case AV_CODEC_ID_H263:
3595 case AV_CODEC_ID_H263P:
3596 case AV_CODEC_ID_FLV1:
3597 if (CONFIG_H263_ENCODER)
3598 ff_clean_h263_qscales(s);
3601 ff_init_qscale_tab(s);
3604 s->lambda= s->lambda_table[0];
3607 s->lambda = s->current_picture.f->quality;
3612 /* must be called before writing the header */
3613 static void set_frame_distances(MpegEncContext * s){
3614 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3615 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3617 if(s->pict_type==AV_PICTURE_TYPE_B){
3618 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3619 av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3621 s->pp_time= s->time - s->last_non_b_time;
3622 s->last_non_b_time= s->time;
3623 av_assert1(s->picture_number==0 || s->pp_time > 0);
3627 static int encode_picture(MpegEncContext *s, int picture_number)
3631 int context_count = s->slice_context_count;
3633 s->picture_number = picture_number;
3635 /* Reset the average MB variance */
3636 s->me.mb_var_sum_temp =
3637 s->me.mc_mb_var_sum_temp = 0;
3639 /* we need to initialize some time vars before we can encode B-frames */
3640 // RAL: Condition added for MPEG1VIDEO
3641 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3642 set_frame_distances(s);
3643 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3644 ff_set_mpeg4_time(s);
3646 s->me.scene_change_score=0;
3648 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3650 if(s->pict_type==AV_PICTURE_TYPE_I){
3651 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3652 else s->no_rounding=0;
3653 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3654 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3655 s->no_rounding ^= 1;
3658 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3659 if (estimate_qp(s,1) < 0)
3661 ff_get_2pass_fcode(s);
3662 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3663 if(s->pict_type==AV_PICTURE_TYPE_B)
3664 s->lambda= s->last_lambda_for[s->pict_type];
3666 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3670 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3671 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3672 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3673 s->q_chroma_intra_matrix = s->q_intra_matrix;
3674 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3677 s->mb_intra=0; //for the rate distortion & bit compare functions
3678 for(i=1; i<context_count; i++){
3679 ret = ff_update_duplicate_context(s->thread_context[i], s);
3687 /* Estimate motion for every MB */
3688 if(s->pict_type != AV_PICTURE_TYPE_I){
3689 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3690 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3691 if (s->pict_type != AV_PICTURE_TYPE_B) {
3692 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3694 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3698 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3699 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3701 for(i=0; i<s->mb_stride*s->mb_height; i++)
3702 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3704 if(!s->fixed_qscale){
3705 /* finding spatial complexity for I-frame rate control */
3706 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3709 for(i=1; i<context_count; i++){
3710 merge_context_after_me(s, s->thread_context[i]);
3712 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3713 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3716 if (s->me.scene_change_score > s->scenechange_threshold &&
3717 s->pict_type == AV_PICTURE_TYPE_P) {
3718 s->pict_type= AV_PICTURE_TYPE_I;
3719 for(i=0; i<s->mb_stride*s->mb_height; i++)
3720 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3721 if(s->msmpeg4_version >= 3)
3723 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3724 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3728 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3729 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3731 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3733 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3734 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3735 s->f_code= FFMAX3(s->f_code, a, b);
3738 ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
3739 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3740 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3744 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3745 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3750 if(s->pict_type==AV_PICTURE_TYPE_B){
3753 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3754 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3755 s->f_code = FFMAX(a, b);
3757 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3758 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3759 s->b_code = FFMAX(a, b);
3761 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3762 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3763 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3764 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3765 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3767 for(dir=0; dir<2; dir++){
3770 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3771 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3772 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3773 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3781 if (estimate_qp(s, 0) < 0)
3784 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3785 s->pict_type == AV_PICTURE_TYPE_I &&
3786 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3787 s->qscale= 3; //reduce clipping problems
3789 if (s->out_format == FMT_MJPEG) {
3790 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3791 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3793 if (s->avctx->intra_matrix) {
3795 luma_matrix = s->avctx->intra_matrix;
3797 if (s->avctx->chroma_intra_matrix)
3798 chroma_matrix = s->avctx->chroma_intra_matrix;
3800 /* for mjpeg, we do include qscale in the matrix */
3802 int j = s->idsp.idct_permutation[i];
3804 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3805 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3807 s->y_dc_scale_table=
3808 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3809 s->chroma_intra_matrix[0] =
3810 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3811 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3812 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3813 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3814 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3817 if(s->codec_id == AV_CODEC_ID_AMV){
3818 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3819 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3821 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3823 s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3824 s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3826 s->y_dc_scale_table= y;
3827 s->c_dc_scale_table= c;
3828 s->intra_matrix[0] = 13;
3829 s->chroma_intra_matrix[0] = 14;
3830 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3831 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3832 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3833 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3837 if (s->out_format == FMT_SPEEDHQ) {
3838 s->y_dc_scale_table=
3839 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3842 //FIXME var duplication
3843 s->current_picture_ptr->f->key_frame =
3844 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3845 s->current_picture_ptr->f->pict_type =
3846 s->current_picture.f->pict_type = s->pict_type;
3848 if (s->current_picture.f->key_frame)
3849 s->picture_in_gop_number=0;
3851 s->mb_x = s->mb_y = 0;
3852 s->last_bits= put_bits_count(&s->pb);
3853 switch(s->out_format) {
3854 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3856 /* s->huffman == HUFFMAN_TABLE_OPTIMAL can only be true for MJPEG. */
3857 if (!CONFIG_MJPEG_ENCODER || s->huffman != HUFFMAN_TABLE_OPTIMAL)
3858 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3859 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3863 if (CONFIG_SPEEDHQ_ENCODER)
3864 ff_speedhq_encode_picture_header(s);
3867 if (CONFIG_H261_ENCODER)
3868 ff_h261_encode_picture_header(s, picture_number);
3871 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3872 ff_wmv2_encode_picture_header(s, picture_number);
3873 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3874 ff_msmpeg4_encode_picture_header(s, picture_number);
3875 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3876 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3879 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3880 ret = ff_rv10_encode_picture_header(s, picture_number);
3884 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3885 ff_rv20_encode_picture_header(s, picture_number);
3886 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3887 ff_flv_encode_picture_header(s, picture_number);
3888 else if (CONFIG_H263_ENCODER)
3889 ff_h263_encode_picture_header(s, picture_number);
3892 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3893 ff_mpeg1_encode_picture_header(s, picture_number);
3898 bits= put_bits_count(&s->pb);
3899 s->header_bits= bits - s->last_bits;
3901 for(i=1; i<context_count; i++){
3902 update_duplicate_context_after_me(s->thread_context[i], s);
3904 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3905 for(i=1; i<context_count; i++){
3906 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3907 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3908 merge_context_after_encode(s, s->thread_context[i]);
3914 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3915 const int intra= s->mb_intra;
3918 s->dct_count[intra]++;
3920 for(i=0; i<64; i++){
3921 int level= block[i];
3925 s->dct_error_sum[intra][i] += level;
3926 level -= s->dct_offset[intra][i];
3927 if(level<0) level=0;
3929 s->dct_error_sum[intra][i] -= level;
3930 level += s->dct_offset[intra][i];
3931 if(level>0) level=0;
3938 static int dct_quantize_trellis_c(MpegEncContext *s,
3939 int16_t *block, int n,
3940 int qscale, int *overflow){
3942 const uint16_t *matrix;
3943 const uint8_t *scantable;
3944 const uint8_t *perm_scantable;
3946 unsigned int threshold1, threshold2;
3958 int coeff_count[64];
3959 int qmul, qadd, start_i, last_non_zero, i, dc;
3960 const int esc_length= s->ac_esc_length;
3962 uint8_t * last_length;
3963 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3966 s->fdsp.fdct(block);
3968 if(s->dct_error_sum)
3969 s->denoise_dct(s, block);
3971 qadd= ((qscale-1)|1)*8;
3973 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3974 else mpeg2_qscale = qscale << 1;
3978 scantable= s->intra_scantable.scantable;
3979 perm_scantable= s->intra_scantable.permutated;
3987 /* For AIC we skip quant/dequant of INTRADC */
3992 /* note: block[0] is assumed to be positive */
3993 block[0] = (block[0] + (q >> 1)) / q;
3996 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3997 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3998 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3999 bias= 1<<(QMAT_SHIFT-1);
4001 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4002 length = s->intra_chroma_ac_vlc_length;
4003 last_length= s->intra_chroma_ac_vlc_last_length;
4005 length = s->intra_ac_vlc_length;
4006 last_length= s->intra_ac_vlc_last_length;
4009 scantable= s->inter_scantable.scantable;
4010 perm_scantable= s->inter_scantable.permutated;
4013 qmat = s->q_inter_matrix[qscale];
4014 matrix = s->inter_matrix;
4015 length = s->inter_ac_vlc_length;
4016 last_length= s->inter_ac_vlc_last_length;
4020 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4021 threshold2= (threshold1<<1);
4023 for(i=63; i>=start_i; i--) {
4024 const int j = scantable[i];
4025 int level = block[j] * qmat[j];
4027 if(((unsigned)(level+threshold1))>threshold2){
4033 for(i=start_i; i<=last_non_zero; i++) {
4034 const int j = scantable[i];
4035 int level = block[j] * qmat[j];
4037 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4038 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4039 if(((unsigned)(level+threshold1))>threshold2){
4041 level= (bias + level)>>QMAT_SHIFT;
4043 coeff[1][i]= level-1;
4044 // coeff[2][k]= level-2;
4046 level= (bias - level)>>QMAT_SHIFT;
4047 coeff[0][i]= -level;
4048 coeff[1][i]= -level+1;
4049 // coeff[2][k]= -level+2;
4051 coeff_count[i]= FFMIN(level, 2);
4052 av_assert2(coeff_count[i]);
4055 coeff[0][i]= (level>>31)|1;
4060 *overflow= s->max_qcoeff < max; //overflow might have happened
4062 if(last_non_zero < start_i){
4063 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4064 return last_non_zero;
4067 score_tab[start_i]= 0;
4068 survivor[0]= start_i;
4071 for(i=start_i; i<=last_non_zero; i++){
4072 int level_index, j, zero_distortion;
4073 int dct_coeff= FFABS(block[ scantable[i] ]);
4074 int best_score=256*256*256*120;
4076 if (s->fdsp.fdct == ff_fdct_ifast)
4077 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4078 zero_distortion= dct_coeff*dct_coeff;
4080 for(level_index=0; level_index < coeff_count[i]; level_index++){
4082 int level= coeff[level_index][i];
4083 const int alevel= FFABS(level);
4088 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4089 unquant_coeff= alevel*qmul + qadd;
4090 } else if(s->out_format == FMT_MJPEG) {
4091 j = s->idsp.idct_permutation[scantable[i]];
4092 unquant_coeff = alevel * matrix[j] * 8;
4094 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4096 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4097 unquant_coeff = (unquant_coeff - 1) | 1;
4099 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4100 unquant_coeff = (unquant_coeff - 1) | 1;
4105 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4107 if((level&(~127)) == 0){
4108 for(j=survivor_count-1; j>=0; j--){
4109 int run= i - survivor[j];
4110 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4111 score += score_tab[i-run];
4113 if(score < best_score){
4116 level_tab[i+1]= level-64;
4120 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4121 for(j=survivor_count-1; j>=0; j--){
4122 int run= i - survivor[j];
4123 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4124 score += score_tab[i-run];
4125 if(score < last_score){
4128 last_level= level-64;
4134 distortion += esc_length*lambda;
4135 for(j=survivor_count-1; j>=0; j--){
4136 int run= i - survivor[j];
4137 int score= distortion + score_tab[i-run];
4139 if(score < best_score){
4142 level_tab[i+1]= level-64;
4146 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4147 for(j=survivor_count-1; j>=0; j--){
4148 int run= i - survivor[j];
4149 int score= distortion + score_tab[i-run];
4150 if(score < last_score){
4153 last_level= level-64;
4161 score_tab[i+1]= best_score;
4163 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4164 if(last_non_zero <= 27){
4165 for(; survivor_count; survivor_count--){
4166 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4170 for(; survivor_count; survivor_count--){
4171 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4176 survivor[ survivor_count++ ]= i+1;
4179 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4180 last_score= 256*256*256*120;
4181 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4182 int score= score_tab[i];
4184 score += lambda * 2; // FIXME more exact?
4186 if(score < last_score){
4189 last_level= level_tab[i];
4190 last_run= run_tab[i];
4195 s->coded_score[n] = last_score;
4197 dc= FFABS(block[0]);
4198 last_non_zero= last_i - 1;
4199 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4201 if(last_non_zero < start_i)
4202 return last_non_zero;
4204 if(last_non_zero == 0 && start_i == 0){
4206 int best_score= dc * dc;
4208 for(i=0; i<coeff_count[0]; i++){
4209 int level= coeff[i][0];
4210 int alevel= FFABS(level);
4211 int unquant_coeff, score, distortion;
4213 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4214 unquant_coeff= (alevel*qmul + qadd)>>3;
4216 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4217 unquant_coeff = (unquant_coeff - 1) | 1;
4219 unquant_coeff = (unquant_coeff + 4) >> 3;
4220 unquant_coeff<<= 3 + 3;
4222 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4224 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4225 else score= distortion + esc_length*lambda;
4227 if(score < best_score){
4229 best_level= level - 64;
4232 block[0]= best_level;
4233 s->coded_score[n] = best_score - dc*dc;
4234 if(best_level == 0) return -1;
4235 else return last_non_zero;
4239 av_assert2(last_level);
4241 block[ perm_scantable[last_non_zero] ]= last_level;
4244 for(; i>start_i; i -= run_tab[i] + 1){
4245 block[ perm_scantable[i-1] ]= level_tab[i];
4248 return last_non_zero;
4251 static int16_t basis[64][64];
4253 static void build_basis(uint8_t *perm){
4260 double s= 0.25*(1<<BASIS_SHIFT);
4262 int perm_index= perm[index];
4263 if(i==0) s*= sqrt(0.5);
4264 if(j==0) s*= sqrt(0.5);
4265 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4272 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4273 int16_t *block, int16_t *weight, int16_t *orig,
4276 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4277 const uint8_t *scantable;
4278 const uint8_t *perm_scantable;
4279 // unsigned int threshold1, threshold2;
4284 int qmul, qadd, start_i, last_non_zero, i, dc;
4286 uint8_t * last_length;
4288 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4290 if(basis[0][0] == 0)
4291 build_basis(s->idsp.idct_permutation);
4296 scantable= s->intra_scantable.scantable;
4297 perm_scantable= s->intra_scantable.permutated;
4304 /* For AIC we skip quant/dequant of INTRADC */
4308 q <<= RECON_SHIFT-3;
4309 /* note: block[0] is assumed to be positive */
4311 // block[0] = (block[0] + (q >> 1)) / q;
4313 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4314 // bias= 1<<(QMAT_SHIFT-1);
4315 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4316 length = s->intra_chroma_ac_vlc_length;
4317 last_length= s->intra_chroma_ac_vlc_last_length;
4319 length = s->intra_ac_vlc_length;
4320 last_length= s->intra_ac_vlc_last_length;
4323 scantable= s->inter_scantable.scantable;
4324 perm_scantable= s->inter_scantable.permutated;
4327 length = s->inter_ac_vlc_length;
4328 last_length= s->inter_ac_vlc_last_length;
4330 last_non_zero = s->block_last_index[n];
4332 dc += (1<<(RECON_SHIFT-1));
4333 for(i=0; i<64; i++){
4334 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4338 for(i=0; i<64; i++){
4343 w= FFABS(weight[i]) + qns*one;
4344 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4347 // w=weight[i] = (63*qns + (w/2)) / w;
4350 av_assert2(w<(1<<6));
4353 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4357 for(i=start_i; i<=last_non_zero; i++){
4358 int j= perm_scantable[i];
4359 const int level= block[j];
4363 if(level<0) coeff= qmul*level - qadd;
4364 else coeff= qmul*level + qadd;
4365 run_tab[rle_index++]=run;
4368 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4375 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4378 int run2, best_unquant_change=0, analyze_gradient;
4379 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4381 if(analyze_gradient){
4382 for(i=0; i<64; i++){
4385 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4391 const int level= block[0];
4392 int change, old_coeff;
4394 av_assert2(s->mb_intra);
4398 for(change=-1; change<=1; change+=2){
4399 int new_level= level + change;
4400 int score, new_coeff;
4402 new_coeff= q*new_level;
4403 if(new_coeff >= 2048 || new_coeff < 0)
4406 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4407 new_coeff - old_coeff);
4408 if(score<best_score){
4411 best_change= change;
4412 best_unquant_change= new_coeff - old_coeff;
4419 run2= run_tab[rle_index++];
4423 for(i=start_i; i<64; i++){
4424 int j= perm_scantable[i];
4425 const int level= block[j];
4426 int change, old_coeff;
4428 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4432 if(level<0) old_coeff= qmul*level - qadd;
4433 else old_coeff= qmul*level + qadd;
4434 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4438 av_assert2(run2>=0 || i >= last_non_zero );
4441 for(change=-1; change<=1; change+=2){
4442 int new_level= level + change;
4443 int score, new_coeff, unquant_change;
4446 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4450 if(new_level<0) new_coeff= qmul*new_level - qadd;
4451 else new_coeff= qmul*new_level + qadd;
4452 if(new_coeff >= 2048 || new_coeff <= -2048)
4454 //FIXME check for overflow
4457 if(level < 63 && level > -63){
4458 if(i < last_non_zero)
4459 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4460 - length[UNI_AC_ENC_INDEX(run, level+64)];
4462 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4463 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4466 av_assert2(FFABS(new_level)==1);
4468 if(analyze_gradient){
4469 int g= d1[ scantable[i] ];
4470 if(g && (g^new_level) >= 0)
4474 if(i < last_non_zero){
4475 int next_i= i + run2 + 1;
4476 int next_level= block[ perm_scantable[next_i] ] + 64;
4478 if(next_level&(~127))
4481 if(next_i < last_non_zero)
4482 score += length[UNI_AC_ENC_INDEX(run, 65)]
4483 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4484 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4486 score += length[UNI_AC_ENC_INDEX(run, 65)]
4487 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4488 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4490 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4492 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4493 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4499 av_assert2(FFABS(level)==1);
4501 if(i < last_non_zero){
4502 int next_i= i + run2 + 1;
4503 int next_level= block[ perm_scantable[next_i] ] + 64;
4505 if(next_level&(~127))
4508 if(next_i < last_non_zero)
4509 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4510 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4511 - length[UNI_AC_ENC_INDEX(run, 65)];
4513 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4514 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4515 - length[UNI_AC_ENC_INDEX(run, 65)];
4517 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4519 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4520 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4527 unquant_change= new_coeff - old_coeff;
4528 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4530 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4532 if(score<best_score){
4535 best_change= change;
4536 best_unquant_change= unquant_change;
4540 prev_level= level + 64;
4541 if(prev_level&(~127))
4551 int j= perm_scantable[ best_coeff ];
4553 block[j] += best_change;
4555 if(best_coeff > last_non_zero){
4556 last_non_zero= best_coeff;
4557 av_assert2(block[j]);
4559 for(; last_non_zero>=start_i; last_non_zero--){
4560 if(block[perm_scantable[last_non_zero]])
4567 for(i=start_i; i<=last_non_zero; i++){
4568 int j= perm_scantable[i];
4569 const int level= block[j];
4572 run_tab[rle_index++]=run;
4579 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4585 return last_non_zero;
4589 * Permute an 8x8 block according to permutation.
4590 * @param block the block which will be permuted according to
4591 * the given permutation vector
4592 * @param permutation the permutation vector
4593 * @param last the last non zero coefficient in scantable order, used to
4594 * speed the permutation up
4595 * @param scantable the used scantable, this is only used to speed the
4596 * permutation up, the block is not (inverse) permutated
4597 * to scantable order!
4599 void ff_block_permute(int16_t *block, uint8_t *permutation,
4600 const uint8_t *scantable, int last)
4607 //FIXME it is ok but not clean and might fail for some permutations
4608 // if (permutation[1] == 1)
4611 for (i = 0; i <= last; i++) {
4612 const int j = scantable[i];
4617 for (i = 0; i <= last; i++) {
4618 const int j = scantable[i];
4619 const int perm_j = permutation[j];
4620 block[perm_j] = temp[j];
4624 int ff_dct_quantize_c(MpegEncContext *s,
4625 int16_t *block, int n,
4626 int qscale, int *overflow)
4628 int i, j, level, last_non_zero, q, start_i;
4630 const uint8_t *scantable;
4633 unsigned int threshold1, threshold2;
4635 s->fdsp.fdct(block);
4637 if(s->dct_error_sum)
4638 s->denoise_dct(s, block);
4641 scantable= s->intra_scantable.scantable;
4649 /* For AIC we skip quant/dequant of INTRADC */
4652 /* note: block[0] is assumed to be positive */
4653 block[0] = (block[0] + (q >> 1)) / q;
4656 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4657 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4659 scantable= s->inter_scantable.scantable;
4662 qmat = s->q_inter_matrix[qscale];
4663 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4665 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4666 threshold2= (threshold1<<1);
4667 for(i=63;i>=start_i;i--) {
4669 level = block[j] * qmat[j];
4671 if(((unsigned)(level+threshold1))>threshold2){
4678 for(i=start_i; i<=last_non_zero; i++) {
4680 level = block[j] * qmat[j];
4682 // if( bias+level >= (1<<QMAT_SHIFT)
4683 // || bias-level >= (1<<QMAT_SHIFT)){
4684 if(((unsigned)(level+threshold1))>threshold2){
4686 level= (bias + level)>>QMAT_SHIFT;
4689 level= (bias - level)>>QMAT_SHIFT;
4697 *overflow= s->max_qcoeff < max; //overflow might have happened
4699 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4700 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4701 ff_block_permute(block, s->idsp.idct_permutation,
4702 scantable, last_non_zero);
4704 return last_non_zero;
4707 #define OFFSET(x) offsetof(MpegEncContext, x)
4708 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4709 static const AVOption h263_options[] = {
4710 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4711 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4713 #if FF_API_MPEGVIDEO_OPTS
4714 FF_MPV_DEPRECATED_MPEG_QUANT_OPT
4719 static const AVClass h263_class = {
4720 .class_name = "H.263 encoder",
4721 .item_name = av_default_item_name,
4722 .option = h263_options,
4723 .version = LIBAVUTIL_VERSION_INT,
4726 AVCodec ff_h263_encoder = {
4728 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4729 .type = AVMEDIA_TYPE_VIDEO,
4730 .id = AV_CODEC_ID_H263,
4731 .priv_data_size = sizeof(MpegEncContext),
4732 .init = ff_mpv_encode_init,
4733 .encode2 = ff_mpv_encode_picture,
4734 .close = ff_mpv_encode_end,
4735 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4736 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4737 .priv_class = &h263_class,
4740 static const AVOption h263p_options[] = {
4741 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4742 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4743 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4744 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4746 #if FF_API_MPEGVIDEO_OPTS
4747 FF_MPV_DEPRECATED_MPEG_QUANT_OPT
4751 static const AVClass h263p_class = {
4752 .class_name = "H.263p encoder",
4753 .item_name = av_default_item_name,
4754 .option = h263p_options,
4755 .version = LIBAVUTIL_VERSION_INT,
4758 AVCodec ff_h263p_encoder = {
4760 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4761 .type = AVMEDIA_TYPE_VIDEO,
4762 .id = AV_CODEC_ID_H263P,
4763 .priv_data_size = sizeof(MpegEncContext),
4764 .init = ff_mpv_encode_init,
4765 .encode2 = ff_mpv_encode_picture,
4766 .close = ff_mpv_encode_end,
4767 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4768 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4769 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4770 .priv_class = &h263p_class,
4773 static const AVClass msmpeg4v2_class = {
4774 .class_name = "msmpeg4v2 encoder",
4775 .item_name = av_default_item_name,
4776 .option = ff_mpv_generic_options,
4777 .version = LIBAVUTIL_VERSION_INT,
4780 AVCodec ff_msmpeg4v2_encoder = {
4781 .name = "msmpeg4v2",
4782 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4783 .type = AVMEDIA_TYPE_VIDEO,
4784 .id = AV_CODEC_ID_MSMPEG4V2,
4785 .priv_data_size = sizeof(MpegEncContext),
4786 .init = ff_mpv_encode_init,
4787 .encode2 = ff_mpv_encode_picture,
4788 .close = ff_mpv_encode_end,
4789 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4790 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4791 .priv_class = &msmpeg4v2_class,
4794 static const AVClass msmpeg4v3_class = {
4795 .class_name = "msmpeg4v3 encoder",
4796 .item_name = av_default_item_name,
4797 .option = ff_mpv_generic_options,
4798 .version = LIBAVUTIL_VERSION_INT,
4801 AVCodec ff_msmpeg4v3_encoder = {
4803 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4804 .type = AVMEDIA_TYPE_VIDEO,
4805 .id = AV_CODEC_ID_MSMPEG4V3,
4806 .priv_data_size = sizeof(MpegEncContext),
4807 .init = ff_mpv_encode_init,
4808 .encode2 = ff_mpv_encode_picture,
4809 .close = ff_mpv_encode_end,
4810 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4811 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4812 .priv_class = &msmpeg4v3_class,
4815 static const AVClass wmv1_class = {
4816 .class_name = "wmv1 encoder",
4817 .item_name = av_default_item_name,
4818 .option = ff_mpv_generic_options,
4819 .version = LIBAVUTIL_VERSION_INT,
4822 AVCodec ff_wmv1_encoder = {
4824 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4825 .type = AVMEDIA_TYPE_VIDEO,
4826 .id = AV_CODEC_ID_WMV1,
4827 .priv_data_size = sizeof(MpegEncContext),
4828 .init = ff_mpv_encode_init,
4829 .encode2 = ff_mpv_encode_picture,
4830 .close = ff_mpv_encode_end,
4831 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4832 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4833 .priv_class = &wmv1_class,