2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
45 #include "mpegvideo.h"
46 #include "mpegvideodata.h"
50 #include "mjpegenc_common.h"
52 #include "mpegutils.h"
55 #include "pixblockdsp.h"
59 #include "aandcttab.h"
61 #include "mpeg4video.h"
63 #include "bytestream.h"
66 #include "packet_internal.h"
71 #define QUANT_BIAS_SHIFT 8
73 #define QMAT_SHIFT_MMX 16
76 static int encode_picture(MpegEncContext *s, int picture_number);
77 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
78 static int sse_mb(MpegEncContext *s);
79 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
80 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
82 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
83 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
85 const AVOption ff_mpv_generic_options[] = {
90 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
91 uint16_t (*qmat16)[2][64],
92 const uint16_t *quant_matrix,
93 int bias, int qmin, int qmax, int intra)
95 FDCTDSPContext *fdsp = &s->fdsp;
99 for (qscale = qmin; qscale <= qmax; qscale++) {
103 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
104 else qscale2 = qscale << 1;
106 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
108 fdsp->fdct == ff_faandct ||
109 #endif /* CONFIG_FAANDCT */
110 fdsp->fdct == ff_jpeg_fdct_islow_10) {
111 for (i = 0; i < 64; i++) {
112 const int j = s->idsp.idct_permutation[i];
113 int64_t den = (int64_t) qscale2 * quant_matrix[j];
114 /* 16 <= qscale * quant_matrix[i] <= 7905
115 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
116 * 19952 <= x <= 249205026
117 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
118 * 3444240 >= (1 << 36) / (x) >= 275 */
120 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
122 } else if (fdsp->fdct == ff_fdct_ifast) {
123 for (i = 0; i < 64; i++) {
124 const int j = s->idsp.idct_permutation[i];
125 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
126 /* 16 <= qscale * quant_matrix[i] <= 7905
127 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
128 * 19952 <= x <= 249205026
129 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
130 * 3444240 >= (1 << 36) / (x) >= 275 */
132 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
135 for (i = 0; i < 64; i++) {
136 const int j = s->idsp.idct_permutation[i];
137 int64_t den = (int64_t) qscale2 * quant_matrix[j];
138 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
139 * Assume x = qscale * quant_matrix[i]
141 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
142 * so 32768 >= (1 << 19) / (x) >= 67 */
143 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
144 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
145 // (qscale * quant_matrix[i]);
146 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
148 if (qmat16[qscale][0][i] == 0 ||
149 qmat16[qscale][0][i] == 128 * 256)
150 qmat16[qscale][0][i] = 128 * 256 - 1;
151 qmat16[qscale][1][i] =
152 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
153 qmat16[qscale][0][i]);
157 for (i = intra; i < 64; i++) {
159 if (fdsp->fdct == ff_fdct_ifast) {
160 max = (8191LL * ff_aanscales[i]) >> 14;
162 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
168 av_log(s->avctx, AV_LOG_INFO,
169 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
174 static inline void update_qscale(MpegEncContext *s)
176 if (s->q_scale_type == 1 && 0) {
178 int bestdiff=INT_MAX;
181 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
182 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
183 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
184 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
186 if (diff < bestdiff) {
193 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
194 (FF_LAMBDA_SHIFT + 7);
195 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
198 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
202 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
208 for (i = 0; i < 64; i++) {
209 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
216 * init s->current_picture.qscale_table from s->lambda_table
218 void ff_init_qscale_tab(MpegEncContext *s)
220 int8_t * const qscale_table = s->current_picture.qscale_table;
223 for (i = 0; i < s->mb_num; i++) {
224 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
225 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
226 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
231 static void update_duplicate_context_after_me(MpegEncContext *dst,
234 #define COPY(a) dst->a= src->a
236 COPY(current_picture);
242 COPY(picture_in_gop_number);
243 COPY(gop_picture_number);
244 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
245 COPY(progressive_frame); // FIXME don't set in encode_header
246 COPY(partitioned_frame); // FIXME don't set in encode_header
251 * Set the given MpegEncContext to defaults for encoding.
252 * the changed fields will not depend upon the prior state of the MpegEncContext.
254 static void mpv_encode_defaults(MpegEncContext *s)
257 ff_mpv_common_defaults(s);
259 for (i = -16; i < 16; i++) {
260 default_fcode_tab[i + MAX_MV] = 1;
262 s->me.mv_penalty = default_mv_penalty;
263 s->fcode_tab = default_fcode_tab;
265 s->input_picture_number = 0;
266 s->picture_in_gop_number = 0;
269 av_cold int ff_dct_encode_init(MpegEncContext *s)
272 ff_dct_encode_init_x86(s);
274 if (CONFIG_H263_ENCODER)
275 ff_h263dsp_init(&s->h263dsp);
276 if (!s->dct_quantize)
277 s->dct_quantize = ff_dct_quantize_c;
279 s->denoise_dct = denoise_dct_c;
280 s->fast_dct_quantize = s->dct_quantize;
281 if (s->avctx->trellis)
282 s->dct_quantize = dct_quantize_trellis_c;
287 /* init video encoder */
288 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
290 MpegEncContext *s = avctx->priv_data;
291 AVCPBProperties *cpb_props;
292 int i, ret, format_supported;
294 mpv_encode_defaults(s);
296 switch (avctx->codec_id) {
297 case AV_CODEC_ID_MPEG2VIDEO:
298 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
299 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
300 av_log(avctx, AV_LOG_ERROR,
301 "only YUV420 and YUV422 are supported\n");
302 return AVERROR(EINVAL);
305 case AV_CODEC_ID_MJPEG:
306 case AV_CODEC_ID_AMV:
307 format_supported = 0;
308 /* JPEG color space */
309 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
310 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
311 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
312 (avctx->color_range == AVCOL_RANGE_JPEG &&
313 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
314 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
315 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
316 format_supported = 1;
317 /* MPEG color space */
318 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
319 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
320 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
321 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
322 format_supported = 1;
324 if (!format_supported) {
325 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
326 return AVERROR(EINVAL);
330 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
331 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
332 return AVERROR(EINVAL);
336 switch (avctx->pix_fmt) {
337 case AV_PIX_FMT_YUVJ444P:
338 case AV_PIX_FMT_YUV444P:
339 s->chroma_format = CHROMA_444;
341 case AV_PIX_FMT_YUVJ422P:
342 case AV_PIX_FMT_YUV422P:
343 s->chroma_format = CHROMA_422;
345 case AV_PIX_FMT_YUVJ420P:
346 case AV_PIX_FMT_YUV420P:
348 s->chroma_format = CHROMA_420;
352 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
354 #if FF_API_PRIVATE_OPT
355 FF_DISABLE_DEPRECATION_WARNINGS
356 if (avctx->rtp_payload_size)
357 s->rtp_payload_size = avctx->rtp_payload_size;
358 if (avctx->me_penalty_compensation)
359 s->me_penalty_compensation = avctx->me_penalty_compensation;
361 s->me_pre = avctx->pre_me;
362 FF_ENABLE_DEPRECATION_WARNINGS
365 s->bit_rate = avctx->bit_rate;
366 s->width = avctx->width;
367 s->height = avctx->height;
368 if (avctx->gop_size > 600 &&
369 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
370 av_log(avctx, AV_LOG_WARNING,
371 "keyframe interval too large!, reducing it from %d to %d\n",
372 avctx->gop_size, 600);
373 avctx->gop_size = 600;
375 s->gop_size = avctx->gop_size;
377 if (avctx->max_b_frames > MAX_B_FRAMES) {
378 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
379 "is %d.\n", MAX_B_FRAMES);
380 avctx->max_b_frames = MAX_B_FRAMES;
382 s->max_b_frames = avctx->max_b_frames;
383 s->codec_id = avctx->codec->id;
384 s->strict_std_compliance = avctx->strict_std_compliance;
385 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
386 s->rtp_mode = !!s->rtp_payload_size;
387 s->intra_dc_precision = avctx->intra_dc_precision;
389 // workaround some differences between how applications specify dc precision
390 if (s->intra_dc_precision < 0) {
391 s->intra_dc_precision += 8;
392 } else if (s->intra_dc_precision >= 8)
393 s->intra_dc_precision -= 8;
395 if (s->intra_dc_precision < 0) {
396 av_log(avctx, AV_LOG_ERROR,
397 "intra dc precision must be positive, note some applications use"
398 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
399 return AVERROR(EINVAL);
402 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
405 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
406 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
407 return AVERROR(EINVAL);
409 s->user_specified_pts = AV_NOPTS_VALUE;
411 if (s->gop_size <= 1) {
419 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
421 s->adaptive_quant = (s->avctx->lumi_masking ||
422 s->avctx->dark_masking ||
423 s->avctx->temporal_cplx_masking ||
424 s->avctx->spatial_cplx_masking ||
425 s->avctx->p_masking ||
427 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
430 s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
432 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
433 switch(avctx->codec_id) {
434 case AV_CODEC_ID_MPEG1VIDEO:
435 case AV_CODEC_ID_MPEG2VIDEO:
436 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
438 case AV_CODEC_ID_MPEG4:
439 case AV_CODEC_ID_MSMPEG4V1:
440 case AV_CODEC_ID_MSMPEG4V2:
441 case AV_CODEC_ID_MSMPEG4V3:
442 if (avctx->rc_max_rate >= 15000000) {
443 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
444 } else if(avctx->rc_max_rate >= 2000000) {
445 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
446 } else if(avctx->rc_max_rate >= 384000) {
447 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
449 avctx->rc_buffer_size = 40;
450 avctx->rc_buffer_size *= 16384;
453 if (avctx->rc_buffer_size) {
454 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
458 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
459 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
460 return AVERROR(EINVAL);
463 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
464 av_log(avctx, AV_LOG_INFO,
465 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
468 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
469 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
470 return AVERROR(EINVAL);
473 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
474 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
475 return AVERROR(EINVAL);
478 if (avctx->rc_max_rate &&
479 avctx->rc_max_rate == avctx->bit_rate &&
480 avctx->rc_max_rate != avctx->rc_min_rate) {
481 av_log(avctx, AV_LOG_INFO,
482 "impossible bitrate constraints, this will fail\n");
485 if (avctx->rc_buffer_size &&
486 avctx->bit_rate * (int64_t)avctx->time_base.num >
487 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
488 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
489 return AVERROR(EINVAL);
492 if (!s->fixed_qscale &&
493 avctx->bit_rate * av_q2d(avctx->time_base) >
494 avctx->bit_rate_tolerance) {
495 av_log(avctx, AV_LOG_WARNING,
496 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
497 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
500 if (s->avctx->rc_max_rate &&
501 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
502 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
503 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
504 90000LL * (avctx->rc_buffer_size - 1) >
505 s->avctx->rc_max_rate * 0xFFFFLL) {
506 av_log(avctx, AV_LOG_INFO,
507 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
508 "specified vbv buffer is too large for the given bitrate!\n");
511 if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
512 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
513 s->codec_id != AV_CODEC_ID_FLV1) {
514 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
515 return AVERROR(EINVAL);
518 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
519 av_log(avctx, AV_LOG_ERROR,
520 "OBMC is only supported with simple mb decision\n");
521 return AVERROR(EINVAL);
524 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
525 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
526 return AVERROR(EINVAL);
529 if (s->max_b_frames &&
530 s->codec_id != AV_CODEC_ID_MPEG4 &&
531 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
532 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
533 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
534 return AVERROR(EINVAL);
536 if (s->max_b_frames < 0) {
537 av_log(avctx, AV_LOG_ERROR,
538 "max b frames must be 0 or positive for mpegvideo based encoders\n");
539 return AVERROR(EINVAL);
542 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
543 s->codec_id == AV_CODEC_ID_H263 ||
544 s->codec_id == AV_CODEC_ID_H263P) &&
545 (avctx->sample_aspect_ratio.num > 255 ||
546 avctx->sample_aspect_ratio.den > 255)) {
547 av_log(avctx, AV_LOG_WARNING,
548 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
549 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
550 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
551 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
554 if ((s->codec_id == AV_CODEC_ID_H263 ||
555 s->codec_id == AV_CODEC_ID_H263P) &&
556 (avctx->width > 2048 ||
557 avctx->height > 1152 )) {
558 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
559 return AVERROR(EINVAL);
561 if ((s->codec_id == AV_CODEC_ID_H263 ||
562 s->codec_id == AV_CODEC_ID_H263P) &&
563 ((avctx->width &3) ||
564 (avctx->height&3) )) {
565 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
566 return AVERROR(EINVAL);
569 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
570 (avctx->width > 4095 ||
571 avctx->height > 4095 )) {
572 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
573 return AVERROR(EINVAL);
576 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
577 (avctx->width > 16383 ||
578 avctx->height > 16383 )) {
579 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
580 return AVERROR(EINVAL);
583 if (s->codec_id == AV_CODEC_ID_RV10 &&
585 avctx->height&15 )) {
586 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
587 return AVERROR(EINVAL);
590 if (s->codec_id == AV_CODEC_ID_RV20 &&
593 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
594 return AVERROR(EINVAL);
597 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
598 s->codec_id == AV_CODEC_ID_WMV2) &&
600 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
601 return AVERROR(EINVAL);
604 if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
605 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
606 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
607 return AVERROR(EINVAL);
610 #if FF_API_PRIVATE_OPT
611 FF_DISABLE_DEPRECATION_WARNINGS
612 if (avctx->mpeg_quant)
613 s->mpeg_quant = avctx->mpeg_quant;
614 FF_ENABLE_DEPRECATION_WARNINGS
617 // FIXME mpeg2 uses that too
618 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
619 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
620 av_log(avctx, AV_LOG_ERROR,
621 "mpeg2 style quantization not supported by codec\n");
622 return AVERROR(EINVAL);
625 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
626 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
627 return AVERROR(EINVAL);
630 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
631 s->avctx->mb_decision != FF_MB_DECISION_RD) {
632 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
633 return AVERROR(EINVAL);
636 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
637 (s->codec_id == AV_CODEC_ID_AMV ||
638 s->codec_id == AV_CODEC_ID_MJPEG)) {
639 // Used to produce garbage with MJPEG.
640 av_log(avctx, AV_LOG_ERROR,
641 "QP RD is no longer compatible with MJPEG or AMV\n");
642 return AVERROR(EINVAL);
645 #if FF_API_PRIVATE_OPT
646 FF_DISABLE_DEPRECATION_WARNINGS
647 if (avctx->scenechange_threshold)
648 s->scenechange_threshold = avctx->scenechange_threshold;
649 FF_ENABLE_DEPRECATION_WARNINGS
652 if (s->scenechange_threshold < 1000000000 &&
653 (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
654 av_log(avctx, AV_LOG_ERROR,
655 "closed gop with scene change detection are not supported yet, "
656 "set threshold to 1000000000\n");
657 return AVERROR_PATCHWELCOME;
660 if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
661 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
662 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
663 av_log(avctx, AV_LOG_ERROR,
664 "low delay forcing is only available for mpeg2, "
665 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
666 return AVERROR(EINVAL);
668 if (s->max_b_frames != 0) {
669 av_log(avctx, AV_LOG_ERROR,
670 "B-frames cannot be used with low delay\n");
671 return AVERROR(EINVAL);
675 if (s->q_scale_type == 1) {
676 if (avctx->qmax > 28) {
677 av_log(avctx, AV_LOG_ERROR,
678 "non linear quant only supports qmax <= 28 currently\n");
679 return AVERROR_PATCHWELCOME;
683 if (avctx->slices > 1 &&
684 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
685 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
686 return AVERROR(EINVAL);
689 if (s->avctx->thread_count > 1 &&
690 s->codec_id != AV_CODEC_ID_MPEG4 &&
691 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
692 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
693 s->codec_id != AV_CODEC_ID_MJPEG &&
694 (s->codec_id != AV_CODEC_ID_H263P)) {
695 av_log(avctx, AV_LOG_ERROR,
696 "multi threaded encoding not supported by codec\n");
697 return AVERROR_PATCHWELCOME;
700 if (s->avctx->thread_count < 1) {
701 av_log(avctx, AV_LOG_ERROR,
702 "automatic thread number detection not supported by codec, "
704 return AVERROR_PATCHWELCOME;
707 if (!avctx->time_base.den || !avctx->time_base.num) {
708 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
709 return AVERROR(EINVAL);
712 #if FF_API_PRIVATE_OPT
713 FF_DISABLE_DEPRECATION_WARNINGS
714 if (avctx->b_frame_strategy)
715 s->b_frame_strategy = avctx->b_frame_strategy;
716 if (avctx->b_sensitivity != 40)
717 s->b_sensitivity = avctx->b_sensitivity;
718 FF_ENABLE_DEPRECATION_WARNINGS
721 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
722 av_log(avctx, AV_LOG_INFO,
723 "notice: b_frame_strategy only affects the first pass\n");
724 s->b_frame_strategy = 0;
727 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
729 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
730 avctx->time_base.den /= i;
731 avctx->time_base.num /= i;
735 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
736 // (a + x * 3 / 8) / x
737 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
738 s->inter_quant_bias = 0;
740 s->intra_quant_bias = 0;
742 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
745 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
746 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
747 return AVERROR(EINVAL);
750 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
752 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
753 s->avctx->time_base.den > (1 << 16) - 1) {
754 av_log(avctx, AV_LOG_ERROR,
755 "timebase %d/%d not supported by MPEG 4 standard, "
756 "the maximum admitted value for the timebase denominator "
757 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
759 return AVERROR(EINVAL);
761 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
763 switch (avctx->codec->id) {
764 case AV_CODEC_ID_MPEG1VIDEO:
765 s->out_format = FMT_MPEG1;
766 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
767 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
769 case AV_CODEC_ID_MPEG2VIDEO:
770 s->out_format = FMT_MPEG1;
771 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
772 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
775 case AV_CODEC_ID_MJPEG:
776 case AV_CODEC_ID_AMV:
777 s->out_format = FMT_MJPEG;
778 s->intra_only = 1; /* force intra only for jpeg */
779 if (!CONFIG_MJPEG_ENCODER)
780 return AVERROR_ENCODER_NOT_FOUND;
781 if ((ret = ff_mjpeg_encode_init(s)) < 0)
786 case AV_CODEC_ID_H261:
787 if (!CONFIG_H261_ENCODER)
788 return AVERROR_ENCODER_NOT_FOUND;
789 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
790 av_log(avctx, AV_LOG_ERROR,
791 "The specified picture size of %dx%d is not valid for the "
792 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
793 s->width, s->height);
794 return AVERROR(EINVAL);
796 s->out_format = FMT_H261;
799 s->rtp_mode = 0; /* Sliced encoding not supported */
801 case AV_CODEC_ID_H263:
802 if (!CONFIG_H263_ENCODER)
803 return AVERROR_ENCODER_NOT_FOUND;
804 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
805 s->width, s->height) == 8) {
806 av_log(avctx, AV_LOG_ERROR,
807 "The specified picture size of %dx%d is not valid for "
808 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
809 "352x288, 704x576, and 1408x1152. "
810 "Try H.263+.\n", s->width, s->height);
811 return AVERROR(EINVAL);
813 s->out_format = FMT_H263;
817 case AV_CODEC_ID_H263P:
818 s->out_format = FMT_H263;
821 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
822 s->modified_quant = s->h263_aic;
823 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
824 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
827 /* These are just to be sure */
831 case AV_CODEC_ID_FLV1:
832 s->out_format = FMT_H263;
833 s->h263_flv = 2; /* format = 1; 11-bit codes */
834 s->unrestricted_mv = 1;
835 s->rtp_mode = 0; /* don't allow GOB */
839 case AV_CODEC_ID_RV10:
840 s->out_format = FMT_H263;
844 case AV_CODEC_ID_RV20:
845 s->out_format = FMT_H263;
848 s->modified_quant = 1;
852 s->unrestricted_mv = 0;
854 case AV_CODEC_ID_MPEG4:
855 s->out_format = FMT_H263;
857 s->unrestricted_mv = 1;
858 s->low_delay = s->max_b_frames ? 0 : 1;
859 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
861 case AV_CODEC_ID_MSMPEG4V2:
862 s->out_format = FMT_H263;
864 s->unrestricted_mv = 1;
865 s->msmpeg4_version = 2;
869 case AV_CODEC_ID_MSMPEG4V3:
870 s->out_format = FMT_H263;
872 s->unrestricted_mv = 1;
873 s->msmpeg4_version = 3;
874 s->flipflop_rounding = 1;
878 case AV_CODEC_ID_WMV1:
879 s->out_format = FMT_H263;
881 s->unrestricted_mv = 1;
882 s->msmpeg4_version = 4;
883 s->flipflop_rounding = 1;
887 case AV_CODEC_ID_WMV2:
888 s->out_format = FMT_H263;
890 s->unrestricted_mv = 1;
891 s->msmpeg4_version = 5;
892 s->flipflop_rounding = 1;
897 return AVERROR(EINVAL);
900 #if FF_API_PRIVATE_OPT
901 FF_DISABLE_DEPRECATION_WARNINGS
902 if (avctx->noise_reduction)
903 s->noise_reduction = avctx->noise_reduction;
904 FF_ENABLE_DEPRECATION_WARNINGS
907 avctx->has_b_frames = !s->low_delay;
911 s->progressive_frame =
912 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
913 AV_CODEC_FLAG_INTERLACED_ME) ||
918 if ((ret = ff_mpv_common_init(s)) < 0)
921 ff_fdctdsp_init(&s->fdsp, avctx);
922 ff_me_cmp_init(&s->mecc, avctx);
923 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
924 ff_pixblockdsp_init(&s->pdsp, avctx);
925 ff_qpeldsp_init(&s->qdsp);
927 if (s->msmpeg4_version) {
928 int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
929 if (!(s->ac_stats = av_mallocz(ac_stats_size)))
930 return AVERROR(ENOMEM);
933 if (!(s->avctx->stats_out = av_mallocz(256)) ||
934 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
935 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
936 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
937 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
938 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
939 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
940 !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
941 !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
942 return AVERROR(ENOMEM);
944 if (s->noise_reduction) {
945 if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
946 return AVERROR(ENOMEM);
949 ff_dct_encode_init(s);
951 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
952 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
954 if (s->slice_context_count > 1) {
957 if (avctx->codec_id == AV_CODEC_ID_H263P)
958 s->h263_slice_structured = 1;
961 s->quant_precision = 5;
963 #if FF_API_PRIVATE_OPT
964 FF_DISABLE_DEPRECATION_WARNINGS
965 if (avctx->frame_skip_threshold)
966 s->frame_skip_threshold = avctx->frame_skip_threshold;
967 if (avctx->frame_skip_factor)
968 s->frame_skip_factor = avctx->frame_skip_factor;
969 if (avctx->frame_skip_exp)
970 s->frame_skip_exp = avctx->frame_skip_exp;
971 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
972 s->frame_skip_cmp = avctx->frame_skip_cmp;
973 FF_ENABLE_DEPRECATION_WARNINGS
976 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
977 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
979 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
980 ff_h261_encode_init(s);
981 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
982 ff_h263_encode_init(s);
983 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
984 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
986 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
987 && s->out_format == FMT_MPEG1)
988 ff_mpeg1_encode_init(s);
991 for (i = 0; i < 64; i++) {
992 int j = s->idsp.idct_permutation[i];
993 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
995 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
996 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
997 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
999 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1002 s->chroma_intra_matrix[j] =
1003 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1004 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1006 if (s->avctx->intra_matrix)
1007 s->intra_matrix[j] = s->avctx->intra_matrix[i];
1008 if (s->avctx->inter_matrix)
1009 s->inter_matrix[j] = s->avctx->inter_matrix[i];
1012 /* precompute matrix */
1013 /* for mjpeg, we do include qscale in the matrix */
1014 if (s->out_format != FMT_MJPEG) {
1015 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1016 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1018 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1019 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1023 if ((ret = ff_rate_control_init(s)) < 0)
1026 #if FF_API_PRIVATE_OPT
1027 FF_DISABLE_DEPRECATION_WARNINGS
1028 if (avctx->brd_scale)
1029 s->brd_scale = avctx->brd_scale;
1031 if (avctx->prediction_method)
1032 s->pred = avctx->prediction_method + 1;
1033 FF_ENABLE_DEPRECATION_WARNINGS
1036 if (s->b_frame_strategy == 2) {
1037 for (i = 0; i < s->max_b_frames + 2; i++) {
1038 s->tmp_frames[i] = av_frame_alloc();
1039 if (!s->tmp_frames[i])
1040 return AVERROR(ENOMEM);
1042 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1043 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1044 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1046 ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1052 cpb_props = ff_add_cpb_side_data(avctx);
1054 return AVERROR(ENOMEM);
1055 cpb_props->max_bitrate = avctx->rc_max_rate;
1056 cpb_props->min_bitrate = avctx->rc_min_rate;
1057 cpb_props->avg_bitrate = avctx->bit_rate;
1058 cpb_props->buffer_size = avctx->rc_buffer_size;
1063 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1065 MpegEncContext *s = avctx->priv_data;
1068 ff_rate_control_uninit(s);
1070 ff_mpv_common_end(s);
1071 if (CONFIG_MJPEG_ENCODER &&
1072 s->out_format == FMT_MJPEG)
1073 ff_mjpeg_encode_close(s);
1075 av_freep(&avctx->extradata);
1077 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1078 av_frame_free(&s->tmp_frames[i]);
1080 ff_free_picture_tables(&s->new_picture);
1081 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1083 av_freep(&s->avctx->stats_out);
1084 av_freep(&s->ac_stats);
1086 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1087 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1088 s->q_chroma_intra_matrix= NULL;
1089 s->q_chroma_intra_matrix16= NULL;
1090 av_freep(&s->q_intra_matrix);
1091 av_freep(&s->q_inter_matrix);
1092 av_freep(&s->q_intra_matrix16);
1093 av_freep(&s->q_inter_matrix16);
1094 av_freep(&s->input_picture);
1095 av_freep(&s->reordered_input_picture);
1096 av_freep(&s->dct_offset);
1101 static int get_sae(uint8_t *src, int ref, int stride)
1106 for (y = 0; y < 16; y++) {
1107 for (x = 0; x < 16; x++) {
1108 acc += FFABS(src[x + y * stride] - ref);
1115 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1116 uint8_t *ref, int stride)
1122 h = s->height & ~15;
1124 for (y = 0; y < h; y += 16) {
1125 for (x = 0; x < w; x += 16) {
1126 int offset = x + y * stride;
1127 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1129 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1130 int sae = get_sae(src + offset, mean, stride);
1132 acc += sae + 500 < sad;
1138 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1140 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1141 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1142 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1143 &s->linesize, &s->uvlinesize);
1146 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1148 Picture *pic = NULL;
1150 int i, display_picture_number = 0, ret;
1151 int encoding_delay = s->max_b_frames ? s->max_b_frames
1152 : (s->low_delay ? 0 : 1);
1153 int flush_offset = 1;
1158 display_picture_number = s->input_picture_number++;
1160 if (pts != AV_NOPTS_VALUE) {
1161 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1162 int64_t last = s->user_specified_pts;
1165 av_log(s->avctx, AV_LOG_ERROR,
1166 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1168 return AVERROR(EINVAL);
1171 if (!s->low_delay && display_picture_number == 1)
1172 s->dts_delta = pts - last;
1174 s->user_specified_pts = pts;
1176 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1177 s->user_specified_pts =
1178 pts = s->user_specified_pts + 1;
1179 av_log(s->avctx, AV_LOG_INFO,
1180 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1183 pts = display_picture_number;
1187 if (!pic_arg->buf[0] ||
1188 pic_arg->linesize[0] != s->linesize ||
1189 pic_arg->linesize[1] != s->uvlinesize ||
1190 pic_arg->linesize[2] != s->uvlinesize)
1192 if ((s->width & 15) || (s->height & 15))
1194 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1196 if (s->linesize & (STRIDE_ALIGN-1))
1199 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1200 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1202 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1206 pic = &s->picture[i];
1210 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1213 ret = alloc_picture(s, pic, direct);
1218 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1219 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1220 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1223 int h_chroma_shift, v_chroma_shift;
1224 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1228 for (i = 0; i < 3; i++) {
1229 int src_stride = pic_arg->linesize[i];
1230 int dst_stride = i ? s->uvlinesize : s->linesize;
1231 int h_shift = i ? h_chroma_shift : 0;
1232 int v_shift = i ? v_chroma_shift : 0;
1233 int w = s->width >> h_shift;
1234 int h = s->height >> v_shift;
1235 uint8_t *src = pic_arg->data[i];
1236 uint8_t *dst = pic->f->data[i];
1239 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1240 && !s->progressive_sequence
1241 && FFALIGN(s->height, 32) - s->height > 16)
1244 if (!s->avctx->rc_buffer_size)
1245 dst += INPLACE_OFFSET;
1247 if (src_stride == dst_stride)
1248 memcpy(dst, src, src_stride * h);
1251 uint8_t *dst2 = dst;
1253 memcpy(dst2, src, w);
1258 if ((s->width & 15) || (s->height & (vpad-1))) {
1259 s->mpvencdsp.draw_edges(dst, dst_stride,
1269 ret = av_frame_copy_props(pic->f, pic_arg);
1273 pic->f->display_picture_number = display_picture_number;
1274 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1276 /* Flushing: When we have not received enough input frames,
1277 * ensure s->input_picture[0] contains the first picture */
1278 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1279 if (s->input_picture[flush_offset])
1282 if (flush_offset <= 1)
1285 encoding_delay = encoding_delay - flush_offset + 1;
1288 /* shift buffer entries */
1289 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1290 s->input_picture[i - flush_offset] = s->input_picture[i];
1292 s->input_picture[encoding_delay] = (Picture*) pic;
1297 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1301 int64_t score64 = 0;
1303 for (plane = 0; plane < 3; plane++) {
1304 const int stride = p->f->linesize[plane];
1305 const int bw = plane ? 1 : 2;
1306 for (y = 0; y < s->mb_height * bw; y++) {
1307 for (x = 0; x < s->mb_width * bw; x++) {
1308 int off = p->shared ? 0 : 16;
1309 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1310 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1311 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1313 switch (FFABS(s->frame_skip_exp)) {
1314 case 0: score = FFMAX(score, v); break;
1315 case 1: score += FFABS(v); break;
1316 case 2: score64 += v * (int64_t)v; break;
1317 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1318 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1327 if (s->frame_skip_exp < 0)
1328 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1329 -1.0/s->frame_skip_exp);
1331 if (score64 < s->frame_skip_threshold)
1333 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1338 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1340 AVPacket pkt = { 0 };
1344 av_init_packet(&pkt);
1346 ret = avcodec_send_frame(c, frame);
1351 ret = avcodec_receive_packet(c, &pkt);
1354 av_packet_unref(&pkt);
1355 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1362 static int estimate_best_b_count(MpegEncContext *s)
1364 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1365 const int scale = s->brd_scale;
1366 int width = s->width >> scale;
1367 int height = s->height >> scale;
1368 int i, j, out_size, p_lambda, b_lambda, lambda2;
1369 int64_t best_rd = INT64_MAX;
1370 int best_b_count = -1;
1373 av_assert0(scale >= 0 && scale <= 3);
1376 //s->next_picture_ptr->quality;
1377 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1378 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1379 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1380 if (!b_lambda) // FIXME we should do this somewhere else
1381 b_lambda = p_lambda;
1382 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1385 for (i = 0; i < s->max_b_frames + 2; i++) {
1386 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1387 s->next_picture_ptr;
1390 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1391 pre_input = *pre_input_ptr;
1392 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1394 if (!pre_input.shared && i) {
1395 data[0] += INPLACE_OFFSET;
1396 data[1] += INPLACE_OFFSET;
1397 data[2] += INPLACE_OFFSET;
1400 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1401 s->tmp_frames[i]->linesize[0],
1403 pre_input.f->linesize[0],
1405 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1406 s->tmp_frames[i]->linesize[1],
1408 pre_input.f->linesize[1],
1409 width >> 1, height >> 1);
1410 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1411 s->tmp_frames[i]->linesize[2],
1413 pre_input.f->linesize[2],
1414 width >> 1, height >> 1);
1418 for (j = 0; j < s->max_b_frames + 1; j++) {
1422 if (!s->input_picture[j])
1425 c = avcodec_alloc_context3(NULL);
1427 return AVERROR(ENOMEM);
1431 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1432 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1433 c->mb_decision = s->avctx->mb_decision;
1434 c->me_cmp = s->avctx->me_cmp;
1435 c->mb_cmp = s->avctx->mb_cmp;
1436 c->me_sub_cmp = s->avctx->me_sub_cmp;
1437 c->pix_fmt = AV_PIX_FMT_YUV420P;
1438 c->time_base = s->avctx->time_base;
1439 c->max_b_frames = s->max_b_frames;
1441 ret = avcodec_open2(c, codec, NULL);
1445 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1446 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1448 out_size = encode_frame(c, s->tmp_frames[0]);
1454 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1456 for (i = 0; i < s->max_b_frames + 1; i++) {
1457 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1459 s->tmp_frames[i + 1]->pict_type = is_p ?
1460 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1461 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1463 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1469 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1472 /* get the delayed frames */
1473 out_size = encode_frame(c, NULL);
1478 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1480 rd += c->error[0] + c->error[1] + c->error[2];
1488 avcodec_free_context(&c);
1493 return best_b_count;
1496 static int select_input_picture(MpegEncContext *s)
1500 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1501 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1502 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1504 /* set next picture type & ordering */
1505 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1506 if (s->frame_skip_threshold || s->frame_skip_factor) {
1507 if (s->picture_in_gop_number < s->gop_size &&
1508 s->next_picture_ptr &&
1509 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1510 // FIXME check that the gop check above is +-1 correct
1511 av_frame_unref(s->input_picture[0]->f);
1513 ff_vbv_update(s, 0);
1519 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1520 !s->next_picture_ptr || s->intra_only) {
1521 s->reordered_input_picture[0] = s->input_picture[0];
1522 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1523 s->reordered_input_picture[0]->f->coded_picture_number =
1524 s->coded_picture_number++;
1528 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1529 for (i = 0; i < s->max_b_frames + 1; i++) {
1530 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1532 if (pict_num >= s->rc_context.num_entries)
1534 if (!s->input_picture[i]) {
1535 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1539 s->input_picture[i]->f->pict_type =
1540 s->rc_context.entry[pict_num].new_pict_type;
1544 if (s->b_frame_strategy == 0) {
1545 b_frames = s->max_b_frames;
1546 while (b_frames && !s->input_picture[b_frames])
1548 } else if (s->b_frame_strategy == 1) {
1549 for (i = 1; i < s->max_b_frames + 1; i++) {
1550 if (s->input_picture[i] &&
1551 s->input_picture[i]->b_frame_score == 0) {
1552 s->input_picture[i]->b_frame_score =
1554 s->input_picture[i ]->f->data[0],
1555 s->input_picture[i - 1]->f->data[0],
1559 for (i = 0; i < s->max_b_frames + 1; i++) {
1560 if (!s->input_picture[i] ||
1561 s->input_picture[i]->b_frame_score - 1 >
1562 s->mb_num / s->b_sensitivity)
1566 b_frames = FFMAX(0, i - 1);
1569 for (i = 0; i < b_frames + 1; i++) {
1570 s->input_picture[i]->b_frame_score = 0;
1572 } else if (s->b_frame_strategy == 2) {
1573 b_frames = estimate_best_b_count(s);
1580 for (i = b_frames - 1; i >= 0; i--) {
1581 int type = s->input_picture[i]->f->pict_type;
1582 if (type && type != AV_PICTURE_TYPE_B)
1585 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1586 b_frames == s->max_b_frames) {
1587 av_log(s->avctx, AV_LOG_ERROR,
1588 "warning, too many B-frames in a row\n");
1591 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1592 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1593 s->gop_size > s->picture_in_gop_number) {
1594 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1596 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1598 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1602 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1603 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1606 s->reordered_input_picture[0] = s->input_picture[b_frames];
1607 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1608 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1609 s->reordered_input_picture[0]->f->coded_picture_number =
1610 s->coded_picture_number++;
1611 for (i = 0; i < b_frames; i++) {
1612 s->reordered_input_picture[i + 1] = s->input_picture[i];
1613 s->reordered_input_picture[i + 1]->f->pict_type =
1615 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1616 s->coded_picture_number++;
1621 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1623 if (s->reordered_input_picture[0]) {
1624 s->reordered_input_picture[0]->reference =
1625 s->reordered_input_picture[0]->f->pict_type !=
1626 AV_PICTURE_TYPE_B ? 3 : 0;
1628 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1631 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1632 // input is a shared pix, so we can't modify it -> allocate a new
1633 // one & ensure that the shared one is reuseable
1636 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1639 pic = &s->picture[i];
1641 pic->reference = s->reordered_input_picture[0]->reference;
1642 if (alloc_picture(s, pic, 0) < 0) {
1646 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1650 /* mark us unused / free shared pic */
1651 av_frame_unref(s->reordered_input_picture[0]->f);
1652 s->reordered_input_picture[0]->shared = 0;
1654 s->current_picture_ptr = pic;
1656 // input is not a shared pix -> reuse buffer for current_pix
1657 s->current_picture_ptr = s->reordered_input_picture[0];
1658 for (i = 0; i < 4; i++) {
1659 s->new_picture.f->data[i] += INPLACE_OFFSET;
1662 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1663 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1664 s->current_picture_ptr)) < 0)
1667 s->picture_number = s->new_picture.f->display_picture_number;
1672 static void frame_end(MpegEncContext *s)
1674 if (s->unrestricted_mv &&
1675 s->current_picture.reference &&
1677 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1678 int hshift = desc->log2_chroma_w;
1679 int vshift = desc->log2_chroma_h;
1680 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1681 s->current_picture.f->linesize[0],
1682 s->h_edge_pos, s->v_edge_pos,
1683 EDGE_WIDTH, EDGE_WIDTH,
1684 EDGE_TOP | EDGE_BOTTOM);
1685 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1686 s->current_picture.f->linesize[1],
1687 s->h_edge_pos >> hshift,
1688 s->v_edge_pos >> vshift,
1689 EDGE_WIDTH >> hshift,
1690 EDGE_WIDTH >> vshift,
1691 EDGE_TOP | EDGE_BOTTOM);
1692 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1693 s->current_picture.f->linesize[2],
1694 s->h_edge_pos >> hshift,
1695 s->v_edge_pos >> vshift,
1696 EDGE_WIDTH >> hshift,
1697 EDGE_WIDTH >> vshift,
1698 EDGE_TOP | EDGE_BOTTOM);
1703 s->last_pict_type = s->pict_type;
1704 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1705 if (s->pict_type!= AV_PICTURE_TYPE_B)
1706 s->last_non_b_pict_type = s->pict_type;
1708 #if FF_API_CODED_FRAME
1709 FF_DISABLE_DEPRECATION_WARNINGS
1710 av_frame_unref(s->avctx->coded_frame);
1711 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1712 FF_ENABLE_DEPRECATION_WARNINGS
1714 #if FF_API_ERROR_FRAME
1715 FF_DISABLE_DEPRECATION_WARNINGS
1716 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1717 sizeof(s->current_picture.encoding_error));
1718 FF_ENABLE_DEPRECATION_WARNINGS
1722 static void update_noise_reduction(MpegEncContext *s)
1726 for (intra = 0; intra < 2; intra++) {
1727 if (s->dct_count[intra] > (1 << 16)) {
1728 for (i = 0; i < 64; i++) {
1729 s->dct_error_sum[intra][i] >>= 1;
1731 s->dct_count[intra] >>= 1;
1734 for (i = 0; i < 64; i++) {
1735 s->dct_offset[intra][i] = (s->noise_reduction *
1736 s->dct_count[intra] +
1737 s->dct_error_sum[intra][i] / 2) /
1738 (s->dct_error_sum[intra][i] + 1);
1743 static int frame_start(MpegEncContext *s)
1747 /* mark & release old frames */
1748 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1749 s->last_picture_ptr != s->next_picture_ptr &&
1750 s->last_picture_ptr->f->buf[0]) {
1751 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1754 s->current_picture_ptr->f->pict_type = s->pict_type;
1755 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1757 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1758 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1759 s->current_picture_ptr)) < 0)
1762 if (s->pict_type != AV_PICTURE_TYPE_B) {
1763 s->last_picture_ptr = s->next_picture_ptr;
1765 s->next_picture_ptr = s->current_picture_ptr;
1768 if (s->last_picture_ptr) {
1769 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1770 if (s->last_picture_ptr->f->buf[0] &&
1771 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1772 s->last_picture_ptr)) < 0)
1775 if (s->next_picture_ptr) {
1776 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1777 if (s->next_picture_ptr->f->buf[0] &&
1778 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1779 s->next_picture_ptr)) < 0)
1783 if (s->picture_structure!= PICT_FRAME) {
1785 for (i = 0; i < 4; i++) {
1786 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1787 s->current_picture.f->data[i] +=
1788 s->current_picture.f->linesize[i];
1790 s->current_picture.f->linesize[i] *= 2;
1791 s->last_picture.f->linesize[i] *= 2;
1792 s->next_picture.f->linesize[i] *= 2;
1796 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1797 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1798 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1799 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1800 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1801 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1803 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1804 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1807 if (s->dct_error_sum) {
1808 av_assert2(s->noise_reduction && s->encoding);
1809 update_noise_reduction(s);
1815 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1816 const AVFrame *pic_arg, int *got_packet)
1818 MpegEncContext *s = avctx->priv_data;
1819 int i, stuffing_count, ret;
1820 int context_count = s->slice_context_count;
1822 s->vbv_ignore_qmax = 0;
1824 s->picture_in_gop_number++;
1826 if (load_input_picture(s, pic_arg) < 0)
1829 if (select_input_picture(s) < 0) {
1834 if (s->new_picture.f->data[0]) {
1835 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1836 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1838 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1839 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1842 s->mb_info_ptr = av_packet_new_side_data(pkt,
1843 AV_PKT_DATA_H263_MB_INFO,
1844 s->mb_width*s->mb_height*12);
1845 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1848 for (i = 0; i < context_count; i++) {
1849 int start_y = s->thread_context[i]->start_mb_y;
1850 int end_y = s->thread_context[i]-> end_mb_y;
1851 int h = s->mb_height;
1852 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1853 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1855 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1858 s->pict_type = s->new_picture.f->pict_type;
1860 ret = frame_start(s);
1864 ret = encode_picture(s, s->picture_number);
1865 if (growing_buffer) {
1866 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1867 pkt->data = s->pb.buf;
1868 pkt->size = avctx->internal->byte_buffer_size;
1873 #if FF_API_STAT_BITS
1874 FF_DISABLE_DEPRECATION_WARNINGS
1875 avctx->header_bits = s->header_bits;
1876 avctx->mv_bits = s->mv_bits;
1877 avctx->misc_bits = s->misc_bits;
1878 avctx->i_tex_bits = s->i_tex_bits;
1879 avctx->p_tex_bits = s->p_tex_bits;
1880 avctx->i_count = s->i_count;
1881 // FIXME f/b_count in avctx
1882 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1883 avctx->skip_count = s->skip_count;
1884 FF_ENABLE_DEPRECATION_WARNINGS
1889 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1890 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1892 if (avctx->rc_buffer_size) {
1893 RateControlContext *rcc = &s->rc_context;
1894 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1895 int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1896 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1898 if (put_bits_count(&s->pb) > max_size &&
1899 s->lambda < s->lmax) {
1900 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1901 (s->qscale + 1) / s->qscale);
1902 if (s->adaptive_quant) {
1904 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1905 s->lambda_table[i] =
1906 FFMAX(s->lambda_table[i] + min_step,
1907 s->lambda_table[i] * (s->qscale + 1) /
1910 s->mb_skipped = 0; // done in frame_start()
1911 // done in encode_picture() so we must undo it
1912 if (s->pict_type == AV_PICTURE_TYPE_P) {
1913 if (s->flipflop_rounding ||
1914 s->codec_id == AV_CODEC_ID_H263P ||
1915 s->codec_id == AV_CODEC_ID_MPEG4)
1916 s->no_rounding ^= 1;
1918 if (s->pict_type != AV_PICTURE_TYPE_B) {
1919 s->time_base = s->last_time_base;
1920 s->last_non_b_time = s->time - s->pp_time;
1922 for (i = 0; i < context_count; i++) {
1923 PutBitContext *pb = &s->thread_context[i]->pb;
1924 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1926 s->vbv_ignore_qmax = 1;
1927 av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1931 av_assert0(s->avctx->rc_max_rate);
1934 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1935 ff_write_pass1_stats(s);
1937 for (i = 0; i < 4; i++) {
1938 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1939 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1941 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1942 s->current_picture_ptr->encoding_error,
1943 (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1946 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1947 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1948 s->misc_bits + s->i_tex_bits +
1950 flush_put_bits(&s->pb);
1951 s->frame_bits = put_bits_count(&s->pb);
1953 stuffing_count = ff_vbv_update(s, s->frame_bits);
1954 s->stuffing_bits = 8*stuffing_count;
1955 if (stuffing_count) {
1956 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1957 stuffing_count + 50) {
1958 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1962 switch (s->codec_id) {
1963 case AV_CODEC_ID_MPEG1VIDEO:
1964 case AV_CODEC_ID_MPEG2VIDEO:
1965 while (stuffing_count--) {
1966 put_bits(&s->pb, 8, 0);
1969 case AV_CODEC_ID_MPEG4:
1970 put_bits(&s->pb, 16, 0);
1971 put_bits(&s->pb, 16, 0x1C3);
1972 stuffing_count -= 4;
1973 while (stuffing_count--) {
1974 put_bits(&s->pb, 8, 0xFF);
1978 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1980 flush_put_bits(&s->pb);
1981 s->frame_bits = put_bits_count(&s->pb);
1984 /* update MPEG-1/2 vbv_delay for CBR */
1985 if (s->avctx->rc_max_rate &&
1986 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1987 s->out_format == FMT_MPEG1 &&
1988 90000LL * (avctx->rc_buffer_size - 1) <=
1989 s->avctx->rc_max_rate * 0xFFFFLL) {
1990 AVCPBProperties *props;
1993 int vbv_delay, min_delay;
1994 double inbits = s->avctx->rc_max_rate *
1995 av_q2d(s->avctx->time_base);
1996 int minbits = s->frame_bits - 8 *
1997 (s->vbv_delay_ptr - s->pb.buf - 1);
1998 double bits = s->rc_context.buffer_index + minbits - inbits;
2001 av_log(s->avctx, AV_LOG_ERROR,
2002 "Internal error, negative bits\n");
2004 av_assert1(s->repeat_first_field == 0);
2006 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
2007 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
2008 s->avctx->rc_max_rate;
2010 vbv_delay = FFMAX(vbv_delay, min_delay);
2012 av_assert0(vbv_delay < 0xFFFF);
2014 s->vbv_delay_ptr[0] &= 0xF8;
2015 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2016 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2017 s->vbv_delay_ptr[2] &= 0x07;
2018 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2020 props = av_cpb_properties_alloc(&props_size);
2022 return AVERROR(ENOMEM);
2023 props->vbv_delay = vbv_delay * 300;
2025 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2026 (uint8_t*)props, props_size);
2032 #if FF_API_VBV_DELAY
2033 FF_DISABLE_DEPRECATION_WARNINGS
2034 avctx->vbv_delay = vbv_delay * 300;
2035 FF_ENABLE_DEPRECATION_WARNINGS
2038 s->total_bits += s->frame_bits;
2039 #if FF_API_STAT_BITS
2040 FF_DISABLE_DEPRECATION_WARNINGS
2041 avctx->frame_bits = s->frame_bits;
2042 FF_ENABLE_DEPRECATION_WARNINGS
2046 pkt->pts = s->current_picture.f->pts;
2047 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2048 if (!s->current_picture.f->coded_picture_number)
2049 pkt->dts = pkt->pts - s->dts_delta;
2051 pkt->dts = s->reordered_pts;
2052 s->reordered_pts = pkt->pts;
2054 pkt->dts = pkt->pts;
2055 if (s->current_picture.f->key_frame)
2056 pkt->flags |= AV_PKT_FLAG_KEY;
2058 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2063 /* release non-reference frames */
2064 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2065 if (!s->picture[i].reference)
2066 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2069 av_assert1((s->frame_bits & 7) == 0);
2071 pkt->size = s->frame_bits / 8;
2072 *got_packet = !!pkt->size;
2076 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2077 int n, int threshold)
2079 static const char tab[64] = {
2080 3, 2, 2, 1, 1, 1, 1, 1,
2081 1, 1, 1, 1, 1, 1, 1, 1,
2082 1, 1, 1, 1, 1, 1, 1, 1,
2083 0, 0, 0, 0, 0, 0, 0, 0,
2084 0, 0, 0, 0, 0, 0, 0, 0,
2085 0, 0, 0, 0, 0, 0, 0, 0,
2086 0, 0, 0, 0, 0, 0, 0, 0,
2087 0, 0, 0, 0, 0, 0, 0, 0
2092 int16_t *block = s->block[n];
2093 const int last_index = s->block_last_index[n];
2096 if (threshold < 0) {
2098 threshold = -threshold;
2102 /* Are all we could set to zero already zero? */
2103 if (last_index <= skip_dc - 1)
2106 for (i = 0; i <= last_index; i++) {
2107 const int j = s->intra_scantable.permutated[i];
2108 const int level = FFABS(block[j]);
2110 if (skip_dc && i == 0)
2114 } else if (level > 1) {
2120 if (score >= threshold)
2122 for (i = skip_dc; i <= last_index; i++) {
2123 const int j = s->intra_scantable.permutated[i];
2127 s->block_last_index[n] = 0;
2129 s->block_last_index[n] = -1;
2132 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2136 const int maxlevel = s->max_qcoeff;
2137 const int minlevel = s->min_qcoeff;
2141 i = 1; // skip clipping of intra dc
2145 for (; i <= last_index; i++) {
2146 const int j = s->intra_scantable.permutated[i];
2147 int level = block[j];
2149 if (level > maxlevel) {
2152 } else if (level < minlevel) {
2160 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2161 av_log(s->avctx, AV_LOG_INFO,
2162 "warning, clipping %d dct coefficients to %d..%d\n",
2163 overflow, minlevel, maxlevel);
2166 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2170 for (y = 0; y < 8; y++) {
2171 for (x = 0; x < 8; x++) {
2177 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2178 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2179 int v = ptr[x2 + y2 * stride];
2185 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2190 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2191 int motion_x, int motion_y,
2192 int mb_block_height,
2196 int16_t weight[12][64];
2197 int16_t orig[12][64];
2198 const int mb_x = s->mb_x;
2199 const int mb_y = s->mb_y;
2202 int dct_offset = s->linesize * 8; // default for progressive frames
2203 int uv_dct_offset = s->uvlinesize * 8;
2204 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2205 ptrdiff_t wrap_y, wrap_c;
2207 for (i = 0; i < mb_block_count; i++)
2208 skip_dct[i] = s->skipdct;
2210 if (s->adaptive_quant) {
2211 const int last_qp = s->qscale;
2212 const int mb_xy = mb_x + mb_y * s->mb_stride;
2214 s->lambda = s->lambda_table[mb_xy];
2217 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2218 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2219 s->dquant = s->qscale - last_qp;
2221 if (s->out_format == FMT_H263) {
2222 s->dquant = av_clip(s->dquant, -2, 2);
2224 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2226 if (s->pict_type == AV_PICTURE_TYPE_B) {
2227 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2230 if (s->mv_type == MV_TYPE_8X8)
2236 ff_set_qscale(s, last_qp + s->dquant);
2237 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2238 ff_set_qscale(s, s->qscale + s->dquant);
2240 wrap_y = s->linesize;
2241 wrap_c = s->uvlinesize;
2242 ptr_y = s->new_picture.f->data[0] +
2243 (mb_y * 16 * wrap_y) + mb_x * 16;
2244 ptr_cb = s->new_picture.f->data[1] +
2245 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2246 ptr_cr = s->new_picture.f->data[2] +
2247 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2249 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2250 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2251 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2252 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2253 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2255 16, 16, mb_x * 16, mb_y * 16,
2256 s->width, s->height);
2258 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2260 mb_block_width, mb_block_height,
2261 mb_x * mb_block_width, mb_y * mb_block_height,
2263 ptr_cb = ebuf + 16 * wrap_y;
2264 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2266 mb_block_width, mb_block_height,
2267 mb_x * mb_block_width, mb_y * mb_block_height,
2269 ptr_cr = ebuf + 16 * wrap_y + 16;
2273 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2274 int progressive_score, interlaced_score;
2276 s->interlaced_dct = 0;
2277 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2278 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2279 NULL, wrap_y, 8) - 400;
2281 if (progressive_score > 0) {
2282 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2283 NULL, wrap_y * 2, 8) +
2284 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2285 NULL, wrap_y * 2, 8);
2286 if (progressive_score > interlaced_score) {
2287 s->interlaced_dct = 1;
2289 dct_offset = wrap_y;
2290 uv_dct_offset = wrap_c;
2292 if (s->chroma_format == CHROMA_422 ||
2293 s->chroma_format == CHROMA_444)
2299 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2300 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2301 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2302 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2304 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2308 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2309 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2310 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2311 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2312 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2313 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2314 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2315 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2316 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2317 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2318 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2319 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2323 op_pixels_func (*op_pix)[4];
2324 qpel_mc_func (*op_qpix)[16];
2325 uint8_t *dest_y, *dest_cb, *dest_cr;
2327 dest_y = s->dest[0];
2328 dest_cb = s->dest[1];
2329 dest_cr = s->dest[2];
2331 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2332 op_pix = s->hdsp.put_pixels_tab;
2333 op_qpix = s->qdsp.put_qpel_pixels_tab;
2335 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2336 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2339 if (s->mv_dir & MV_DIR_FORWARD) {
2340 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2341 s->last_picture.f->data,
2343 op_pix = s->hdsp.avg_pixels_tab;
2344 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2346 if (s->mv_dir & MV_DIR_BACKWARD) {
2347 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2348 s->next_picture.f->data,
2352 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2353 int progressive_score, interlaced_score;
2355 s->interlaced_dct = 0;
2356 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2357 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2361 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2362 progressive_score -= 400;
2364 if (progressive_score > 0) {
2365 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2367 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2371 if (progressive_score > interlaced_score) {
2372 s->interlaced_dct = 1;
2374 dct_offset = wrap_y;
2375 uv_dct_offset = wrap_c;
2377 if (s->chroma_format == CHROMA_422)
2383 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2384 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2385 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2386 dest_y + dct_offset, wrap_y);
2387 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2388 dest_y + dct_offset + 8, wrap_y);
2390 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2394 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2395 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2396 if (!s->chroma_y_shift) { /* 422 */
2397 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2398 dest_cb + uv_dct_offset, wrap_c);
2399 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2400 dest_cr + uv_dct_offset, wrap_c);
2403 /* pre quantization */
2404 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2405 2 * s->qscale * s->qscale) {
2407 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2409 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2411 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2412 wrap_y, 8) < 20 * s->qscale)
2414 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2415 wrap_y, 8) < 20 * s->qscale)
2417 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2419 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2421 if (!s->chroma_y_shift) { /* 422 */
2422 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2423 dest_cb + uv_dct_offset,
2424 wrap_c, 8) < 20 * s->qscale)
2426 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2427 dest_cr + uv_dct_offset,
2428 wrap_c, 8) < 20 * s->qscale)
2434 if (s->quantizer_noise_shaping) {
2436 get_visual_weight(weight[0], ptr_y , wrap_y);
2438 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2440 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2442 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2444 get_visual_weight(weight[4], ptr_cb , wrap_c);
2446 get_visual_weight(weight[5], ptr_cr , wrap_c);
2447 if (!s->chroma_y_shift) { /* 422 */
2449 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2452 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2455 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2458 /* DCT & quantize */
2459 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2461 for (i = 0; i < mb_block_count; i++) {
2464 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2465 // FIXME we could decide to change to quantizer instead of
2467 // JS: I don't think that would be a good idea it could lower
2468 // quality instead of improve it. Just INTRADC clipping
2469 // deserves changes in quantizer
2471 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2473 s->block_last_index[i] = -1;
2475 if (s->quantizer_noise_shaping) {
2476 for (i = 0; i < mb_block_count; i++) {
2478 s->block_last_index[i] =
2479 dct_quantize_refine(s, s->block[i], weight[i],
2480 orig[i], i, s->qscale);
2485 if (s->luma_elim_threshold && !s->mb_intra)
2486 for (i = 0; i < 4; i++)
2487 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2488 if (s->chroma_elim_threshold && !s->mb_intra)
2489 for (i = 4; i < mb_block_count; i++)
2490 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2492 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2493 for (i = 0; i < mb_block_count; i++) {
2494 if (s->block_last_index[i] == -1)
2495 s->coded_score[i] = INT_MAX / 256;
2500 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2501 s->block_last_index[4] =
2502 s->block_last_index[5] = 0;
2504 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2505 if (!s->chroma_y_shift) { /* 422 / 444 */
2506 for (i=6; i<12; i++) {
2507 s->block_last_index[i] = 0;
2508 s->block[i][0] = s->block[4][0];
2513 // non c quantize code returns incorrect block_last_index FIXME
2514 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2515 for (i = 0; i < mb_block_count; i++) {
2517 if (s->block_last_index[i] > 0) {
2518 for (j = 63; j > 0; j--) {
2519 if (s->block[i][s->intra_scantable.permutated[j]])
2522 s->block_last_index[i] = j;
2527 /* huffman encode */
2528 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2529 case AV_CODEC_ID_MPEG1VIDEO:
2530 case AV_CODEC_ID_MPEG2VIDEO:
2531 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2532 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2534 case AV_CODEC_ID_MPEG4:
2535 if (CONFIG_MPEG4_ENCODER)
2536 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2538 case AV_CODEC_ID_MSMPEG4V2:
2539 case AV_CODEC_ID_MSMPEG4V3:
2540 case AV_CODEC_ID_WMV1:
2541 if (CONFIG_MSMPEG4_ENCODER)
2542 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2544 case AV_CODEC_ID_WMV2:
2545 if (CONFIG_WMV2_ENCODER)
2546 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2548 case AV_CODEC_ID_H261:
2549 if (CONFIG_H261_ENCODER)
2550 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2552 case AV_CODEC_ID_H263:
2553 case AV_CODEC_ID_H263P:
2554 case AV_CODEC_ID_FLV1:
2555 case AV_CODEC_ID_RV10:
2556 case AV_CODEC_ID_RV20:
2557 if (CONFIG_H263_ENCODER)
2558 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2560 case AV_CODEC_ID_MJPEG:
2561 case AV_CODEC_ID_AMV:
2562 if (CONFIG_MJPEG_ENCODER)
2563 ff_mjpeg_encode_mb(s, s->block);
2570 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2572 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2573 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2574 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2577 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2580 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2583 d->mb_skip_run= s->mb_skip_run;
2585 d->last_dc[i] = s->last_dc[i];
2588 d->mv_bits= s->mv_bits;
2589 d->i_tex_bits= s->i_tex_bits;
2590 d->p_tex_bits= s->p_tex_bits;
2591 d->i_count= s->i_count;
2592 d->f_count= s->f_count;
2593 d->b_count= s->b_count;
2594 d->skip_count= s->skip_count;
2595 d->misc_bits= s->misc_bits;
2599 d->qscale= s->qscale;
2600 d->dquant= s->dquant;
2602 d->esc3_level_length= s->esc3_level_length;
2605 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2608 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2609 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2612 d->mb_skip_run= s->mb_skip_run;
2614 d->last_dc[i] = s->last_dc[i];
2617 d->mv_bits= s->mv_bits;
2618 d->i_tex_bits= s->i_tex_bits;
2619 d->p_tex_bits= s->p_tex_bits;
2620 d->i_count= s->i_count;
2621 d->f_count= s->f_count;
2622 d->b_count= s->b_count;
2623 d->skip_count= s->skip_count;
2624 d->misc_bits= s->misc_bits;
2626 d->mb_intra= s->mb_intra;
2627 d->mb_skipped= s->mb_skipped;
2628 d->mv_type= s->mv_type;
2629 d->mv_dir= s->mv_dir;
2631 if(s->data_partitioning){
2633 d->tex_pb= s->tex_pb;
2637 d->block_last_index[i]= s->block_last_index[i];
2638 d->interlaced_dct= s->interlaced_dct;
2639 d->qscale= s->qscale;
2641 d->esc3_level_length= s->esc3_level_length;
2644 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2645 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2646 int *dmin, int *next_block, int motion_x, int motion_y)
2649 uint8_t *dest_backup[3];
2651 copy_context_before_encode(s, backup, type);
2653 s->block= s->blocks[*next_block];
2654 s->pb= pb[*next_block];
2655 if(s->data_partitioning){
2656 s->pb2 = pb2 [*next_block];
2657 s->tex_pb= tex_pb[*next_block];
2661 memcpy(dest_backup, s->dest, sizeof(s->dest));
2662 s->dest[0] = s->sc.rd_scratchpad;
2663 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2664 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2665 av_assert0(s->linesize >= 32); //FIXME
2668 encode_mb(s, motion_x, motion_y);
2670 score= put_bits_count(&s->pb);
2671 if(s->data_partitioning){
2672 score+= put_bits_count(&s->pb2);
2673 score+= put_bits_count(&s->tex_pb);
2676 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2677 ff_mpv_reconstruct_mb(s, s->block);
2679 score *= s->lambda2;
2680 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2684 memcpy(s->dest, dest_backup, sizeof(s->dest));
2691 copy_context_after_encode(best, s, type);
2695 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2696 const uint32_t *sq = ff_square_tab + 256;
2701 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2702 else if(w==8 && h==8)
2703 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2707 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2716 static int sse_mb(MpegEncContext *s){
2720 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2721 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2724 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2725 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2726 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2727 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2729 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2730 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2731 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2734 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2735 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2736 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2739 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2740 MpegEncContext *s= *(void**)arg;
2744 s->me.dia_size= s->avctx->pre_dia_size;
2745 s->first_slice_line=1;
2746 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2747 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2748 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2750 s->first_slice_line=0;
2758 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2759 MpegEncContext *s= *(void**)arg;
2761 ff_check_alignment();
2763 s->me.dia_size= s->avctx->dia_size;
2764 s->first_slice_line=1;
2765 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2766 s->mb_x=0; //for block init below
2767 ff_init_block_index(s);
2768 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2769 s->block_index[0]+=2;
2770 s->block_index[1]+=2;
2771 s->block_index[2]+=2;
2772 s->block_index[3]+=2;
2774 /* compute motion vector & mb_type and store in context */
2775 if(s->pict_type==AV_PICTURE_TYPE_B)
2776 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2778 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2780 s->first_slice_line=0;
2785 static int mb_var_thread(AVCodecContext *c, void *arg){
2786 MpegEncContext *s= *(void**)arg;
2789 ff_check_alignment();
2791 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2792 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2795 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2797 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2799 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2800 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2802 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2803 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2804 s->me.mb_var_sum_temp += varc;
2810 static void write_slice_end(MpegEncContext *s){
2811 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2812 if(s->partitioned_frame){
2813 ff_mpeg4_merge_partitions(s);
2816 ff_mpeg4_stuffing(&s->pb);
2817 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2818 ff_mjpeg_encode_stuffing(s);
2821 avpriv_align_put_bits(&s->pb);
2822 flush_put_bits(&s->pb);
2824 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2825 s->misc_bits+= get_bits_diff(s);
2828 static void write_mb_info(MpegEncContext *s)
2830 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2831 int offset = put_bits_count(&s->pb);
2832 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2833 int gobn = s->mb_y / s->gob_index;
2835 if (CONFIG_H263_ENCODER)
2836 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2837 bytestream_put_le32(&ptr, offset);
2838 bytestream_put_byte(&ptr, s->qscale);
2839 bytestream_put_byte(&ptr, gobn);
2840 bytestream_put_le16(&ptr, mba);
2841 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2842 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2843 /* 4MV not implemented */
2844 bytestream_put_byte(&ptr, 0); /* hmv2 */
2845 bytestream_put_byte(&ptr, 0); /* vmv2 */
2848 static void update_mb_info(MpegEncContext *s, int startcode)
2852 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2853 s->mb_info_size += 12;
2854 s->prev_mb_info = s->last_mb_info;
2857 s->prev_mb_info = put_bits_count(&s->pb)/8;
2858 /* This might have incremented mb_info_size above, and we return without
2859 * actually writing any info into that slot yet. But in that case,
2860 * this will be called again at the start of the after writing the
2861 * start code, actually writing the mb info. */
2865 s->last_mb_info = put_bits_count(&s->pb)/8;
2866 if (!s->mb_info_size)
2867 s->mb_info_size += 12;
2871 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2873 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2874 && s->slice_context_count == 1
2875 && s->pb.buf == s->avctx->internal->byte_buffer) {
2876 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2877 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2879 uint8_t *new_buffer = NULL;
2880 int new_buffer_size = 0;
2882 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2883 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2884 return AVERROR(ENOMEM);
2889 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2890 s->avctx->internal->byte_buffer_size + size_increase);
2892 return AVERROR(ENOMEM);
2894 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2895 av_free(s->avctx->internal->byte_buffer);
2896 s->avctx->internal->byte_buffer = new_buffer;
2897 s->avctx->internal->byte_buffer_size = new_buffer_size;
2898 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2899 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2900 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2902 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2903 return AVERROR(EINVAL);
2907 static int encode_thread(AVCodecContext *c, void *arg){
2908 MpegEncContext *s= *(void**)arg;
2910 int chr_h= 16>>s->chroma_y_shift;
2912 MpegEncContext best_s = { 0 }, backup_s;
2913 uint8_t bit_buf[2][MAX_MB_BYTES];
2914 uint8_t bit_buf2[2][MAX_MB_BYTES];
2915 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2916 PutBitContext pb[2], pb2[2], tex_pb[2];
2918 ff_check_alignment();
2921 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2922 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2923 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2926 s->last_bits= put_bits_count(&s->pb);
2937 /* init last dc values */
2938 /* note: quant matrix value (8) is implied here */
2939 s->last_dc[i] = 128 << s->intra_dc_precision;
2941 s->current_picture.encoding_error[i] = 0;
2943 if(s->codec_id==AV_CODEC_ID_AMV){
2944 s->last_dc[0] = 128*8/13;
2945 s->last_dc[1] = 128*8/14;
2946 s->last_dc[2] = 128*8/14;
2949 memset(s->last_mv, 0, sizeof(s->last_mv));
2953 switch(s->codec_id){
2954 case AV_CODEC_ID_H263:
2955 case AV_CODEC_ID_H263P:
2956 case AV_CODEC_ID_FLV1:
2957 if (CONFIG_H263_ENCODER)
2958 s->gob_index = H263_GOB_HEIGHT(s->height);
2960 case AV_CODEC_ID_MPEG4:
2961 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2962 ff_mpeg4_init_partitions(s);
2968 s->first_slice_line = 1;
2969 s->ptr_lastgob = s->pb.buf;
2970 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2974 ff_set_qscale(s, s->qscale);
2975 ff_init_block_index(s);
2977 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2978 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2979 int mb_type= s->mb_type[xy];
2983 int size_increase = s->avctx->internal->byte_buffer_size/4
2984 + s->mb_width*MAX_MB_BYTES;
2986 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2987 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2988 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2991 if(s->data_partitioning){
2992 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2993 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2994 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3000 s->mb_y = mb_y; // moved into loop, can get changed by H.261
3001 ff_update_block_index(s);
3003 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3004 ff_h261_reorder_mb_index(s);
3005 xy= s->mb_y*s->mb_stride + s->mb_x;
3006 mb_type= s->mb_type[xy];
3009 /* write gob / video packet header */
3011 int current_packet_size, is_gob_start;
3013 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3015 is_gob_start = s->rtp_payload_size &&
3016 current_packet_size >= s->rtp_payload_size &&
3019 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3021 switch(s->codec_id){
3022 case AV_CODEC_ID_H263:
3023 case AV_CODEC_ID_H263P:
3024 if(!s->h263_slice_structured)
3025 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3027 case AV_CODEC_ID_MPEG2VIDEO:
3028 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3029 case AV_CODEC_ID_MPEG1VIDEO:
3030 if(s->mb_skip_run) is_gob_start=0;
3032 case AV_CODEC_ID_MJPEG:
3033 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3038 if(s->start_mb_y != mb_y || mb_x!=0){
3041 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3042 ff_mpeg4_init_partitions(s);
3046 av_assert2((put_bits_count(&s->pb)&7) == 0);
3047 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3049 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3050 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3051 int d = 100 / s->error_rate;
3053 current_packet_size=0;
3054 s->pb.buf_ptr= s->ptr_lastgob;
3055 av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3059 #if FF_API_RTP_CALLBACK
3060 FF_DISABLE_DEPRECATION_WARNINGS
3061 if (s->avctx->rtp_callback){
3062 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3063 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3065 FF_ENABLE_DEPRECATION_WARNINGS
3067 update_mb_info(s, 1);
3069 switch(s->codec_id){
3070 case AV_CODEC_ID_MPEG4:
3071 if (CONFIG_MPEG4_ENCODER) {
3072 ff_mpeg4_encode_video_packet_header(s);
3073 ff_mpeg4_clean_buffers(s);
3076 case AV_CODEC_ID_MPEG1VIDEO:
3077 case AV_CODEC_ID_MPEG2VIDEO:
3078 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3079 ff_mpeg1_encode_slice_header(s);
3080 ff_mpeg1_clean_buffers(s);
3083 case AV_CODEC_ID_H263:
3084 case AV_CODEC_ID_H263P:
3085 if (CONFIG_H263_ENCODER)
3086 ff_h263_encode_gob_header(s, mb_y);
3090 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3091 int bits= put_bits_count(&s->pb);
3092 s->misc_bits+= bits - s->last_bits;
3096 s->ptr_lastgob += current_packet_size;
3097 s->first_slice_line=1;
3098 s->resync_mb_x=mb_x;
3099 s->resync_mb_y=mb_y;
3103 if( (s->resync_mb_x == s->mb_x)
3104 && s->resync_mb_y+1 == s->mb_y){
3105 s->first_slice_line=0;
3109 s->dquant=0; //only for QP_RD
3111 update_mb_info(s, 0);
3113 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3115 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3117 copy_context_before_encode(&backup_s, s, -1);
3119 best_s.data_partitioning= s->data_partitioning;
3120 best_s.partitioned_frame= s->partitioned_frame;
3121 if(s->data_partitioning){
3122 backup_s.pb2= s->pb2;
3123 backup_s.tex_pb= s->tex_pb;
3126 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3127 s->mv_dir = MV_DIR_FORWARD;
3128 s->mv_type = MV_TYPE_16X16;
3130 s->mv[0][0][0] = s->p_mv_table[xy][0];
3131 s->mv[0][0][1] = s->p_mv_table[xy][1];
3132 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3133 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3135 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3136 s->mv_dir = MV_DIR_FORWARD;
3137 s->mv_type = MV_TYPE_FIELD;
3140 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3141 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3142 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3144 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3145 &dmin, &next_block, 0, 0);
3147 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3148 s->mv_dir = MV_DIR_FORWARD;
3149 s->mv_type = MV_TYPE_16X16;
3153 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3154 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3156 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3157 s->mv_dir = MV_DIR_FORWARD;
3158 s->mv_type = MV_TYPE_8X8;
3161 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3162 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3164 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3165 &dmin, &next_block, 0, 0);
3167 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3168 s->mv_dir = MV_DIR_FORWARD;
3169 s->mv_type = MV_TYPE_16X16;
3171 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3172 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3173 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3174 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3176 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3177 s->mv_dir = MV_DIR_BACKWARD;
3178 s->mv_type = MV_TYPE_16X16;
3180 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3181 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3182 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3183 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3185 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3186 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3187 s->mv_type = MV_TYPE_16X16;
3189 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3190 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3191 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3192 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3193 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3194 &dmin, &next_block, 0, 0);
3196 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3197 s->mv_dir = MV_DIR_FORWARD;
3198 s->mv_type = MV_TYPE_FIELD;
3201 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3202 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3203 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3205 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3206 &dmin, &next_block, 0, 0);
3208 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3209 s->mv_dir = MV_DIR_BACKWARD;
3210 s->mv_type = MV_TYPE_FIELD;
3213 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3214 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3215 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3217 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3218 &dmin, &next_block, 0, 0);
3220 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3221 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3222 s->mv_type = MV_TYPE_FIELD;
3224 for(dir=0; dir<2; dir++){
3226 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3227 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3228 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3231 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3232 &dmin, &next_block, 0, 0);
3234 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3236 s->mv_type = MV_TYPE_16X16;
3240 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3241 &dmin, &next_block, 0, 0);
3242 if(s->h263_pred || s->h263_aic){
3244 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3246 ff_clean_intra_table_entries(s); //old mode?
3250 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3251 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3252 const int last_qp= backup_s.qscale;
3255 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3256 static const int dquant_tab[4]={-1,1,-2,2};
3257 int storecoefs = s->mb_intra && s->dc_val[0];
3259 av_assert2(backup_s.dquant == 0);
3262 s->mv_dir= best_s.mv_dir;
3263 s->mv_type = MV_TYPE_16X16;
3264 s->mb_intra= best_s.mb_intra;
3265 s->mv[0][0][0] = best_s.mv[0][0][0];
3266 s->mv[0][0][1] = best_s.mv[0][0][1];
3267 s->mv[1][0][0] = best_s.mv[1][0][0];
3268 s->mv[1][0][1] = best_s.mv[1][0][1];
3270 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3271 for(; qpi<4; qpi++){
3272 int dquant= dquant_tab[qpi];
3273 qp= last_qp + dquant;
3274 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3276 backup_s.dquant= dquant;
3279 dc[i]= s->dc_val[0][ s->block_index[i] ];
3280 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3284 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3285 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3286 if(best_s.qscale != qp){
3289 s->dc_val[0][ s->block_index[i] ]= dc[i];
3290 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3297 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3298 int mx= s->b_direct_mv_table[xy][0];
3299 int my= s->b_direct_mv_table[xy][1];
3301 backup_s.dquant = 0;
3302 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3304 ff_mpeg4_set_direct_mv(s, mx, my);
3305 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3306 &dmin, &next_block, mx, my);
3308 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3309 backup_s.dquant = 0;
3310 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3312 ff_mpeg4_set_direct_mv(s, 0, 0);
3313 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3314 &dmin, &next_block, 0, 0);
3316 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3319 coded |= s->block_last_index[i];
3322 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3323 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3324 mx=my=0; //FIXME find the one we actually used
3325 ff_mpeg4_set_direct_mv(s, mx, my);
3326 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3334 s->mv_dir= best_s.mv_dir;
3335 s->mv_type = best_s.mv_type;
3337 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3338 s->mv[0][0][1] = best_s.mv[0][0][1];
3339 s->mv[1][0][0] = best_s.mv[1][0][0];
3340 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3343 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3344 &dmin, &next_block, mx, my);
3349 s->current_picture.qscale_table[xy] = best_s.qscale;
3351 copy_context_after_encode(s, &best_s, -1);
3353 pb_bits_count= put_bits_count(&s->pb);
3354 flush_put_bits(&s->pb);
3355 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3358 if(s->data_partitioning){
3359 pb2_bits_count= put_bits_count(&s->pb2);
3360 flush_put_bits(&s->pb2);
3361 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3362 s->pb2= backup_s.pb2;
3364 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3365 flush_put_bits(&s->tex_pb);
3366 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3367 s->tex_pb= backup_s.tex_pb;
3369 s->last_bits= put_bits_count(&s->pb);
3371 if (CONFIG_H263_ENCODER &&
3372 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3373 ff_h263_update_motion_val(s);
3375 if(next_block==0){ //FIXME 16 vs linesize16
3376 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3377 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3378 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3381 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3382 ff_mpv_reconstruct_mb(s, s->block);
3384 int motion_x = 0, motion_y = 0;
3385 s->mv_type=MV_TYPE_16X16;
3386 // only one MB-Type possible
3389 case CANDIDATE_MB_TYPE_INTRA:
3392 motion_x= s->mv[0][0][0] = 0;
3393 motion_y= s->mv[0][0][1] = 0;
3395 case CANDIDATE_MB_TYPE_INTER:
3396 s->mv_dir = MV_DIR_FORWARD;
3398 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3399 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3401 case CANDIDATE_MB_TYPE_INTER_I:
3402 s->mv_dir = MV_DIR_FORWARD;
3403 s->mv_type = MV_TYPE_FIELD;
3406 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3407 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3408 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3411 case CANDIDATE_MB_TYPE_INTER4V:
3412 s->mv_dir = MV_DIR_FORWARD;
3413 s->mv_type = MV_TYPE_8X8;
3416 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3417 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3420 case CANDIDATE_MB_TYPE_DIRECT:
3421 if (CONFIG_MPEG4_ENCODER) {
3422 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3424 motion_x=s->b_direct_mv_table[xy][0];
3425 motion_y=s->b_direct_mv_table[xy][1];
3426 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3429 case CANDIDATE_MB_TYPE_DIRECT0:
3430 if (CONFIG_MPEG4_ENCODER) {
3431 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3433 ff_mpeg4_set_direct_mv(s, 0, 0);
3436 case CANDIDATE_MB_TYPE_BIDIR:
3437 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3439 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3440 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3441 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3442 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3444 case CANDIDATE_MB_TYPE_BACKWARD:
3445 s->mv_dir = MV_DIR_BACKWARD;
3447 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3448 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3450 case CANDIDATE_MB_TYPE_FORWARD:
3451 s->mv_dir = MV_DIR_FORWARD;
3453 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3454 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3456 case CANDIDATE_MB_TYPE_FORWARD_I:
3457 s->mv_dir = MV_DIR_FORWARD;
3458 s->mv_type = MV_TYPE_FIELD;
3461 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3462 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3463 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3466 case CANDIDATE_MB_TYPE_BACKWARD_I:
3467 s->mv_dir = MV_DIR_BACKWARD;
3468 s->mv_type = MV_TYPE_FIELD;
3471 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3472 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3473 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3476 case CANDIDATE_MB_TYPE_BIDIR_I:
3477 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3478 s->mv_type = MV_TYPE_FIELD;
3480 for(dir=0; dir<2; dir++){
3482 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3483 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3484 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3489 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3492 encode_mb(s, motion_x, motion_y);
3494 // RAL: Update last macroblock type
3495 s->last_mv_dir = s->mv_dir;
3497 if (CONFIG_H263_ENCODER &&
3498 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3499 ff_h263_update_motion_val(s);
3501 ff_mpv_reconstruct_mb(s, s->block);
3504 /* clean the MV table in IPS frames for direct mode in B-frames */
3505 if(s->mb_intra /* && I,P,S_TYPE */){
3506 s->p_mv_table[xy][0]=0;
3507 s->p_mv_table[xy][1]=0;
3510 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3514 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3515 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3517 s->current_picture.encoding_error[0] += sse(
3518 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3519 s->dest[0], w, h, s->linesize);
3520 s->current_picture.encoding_error[1] += sse(
3521 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3522 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3523 s->current_picture.encoding_error[2] += sse(
3524 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3525 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3528 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3529 ff_h263_loop_filter(s);
3531 ff_dlog(s->avctx, "MB %d %d bits\n",
3532 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3536 //not beautiful here but we must write it before flushing so it has to be here
3537 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3538 ff_msmpeg4_encode_ext_header(s);
3542 #if FF_API_RTP_CALLBACK
3543 FF_DISABLE_DEPRECATION_WARNINGS
3544 /* Send the last GOB if RTP */
3545 if (s->avctx->rtp_callback) {
3546 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3547 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3548 /* Call the RTP callback to send the last GOB */
3550 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3552 FF_ENABLE_DEPRECATION_WARNINGS
3558 #define MERGE(field) dst->field += src->field; src->field=0
3559 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3560 MERGE(me.scene_change_score);
3561 MERGE(me.mc_mb_var_sum_temp);
3562 MERGE(me.mb_var_sum_temp);
3565 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3568 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3569 MERGE(dct_count[1]);
3578 MERGE(er.error_count);
3579 MERGE(padding_bug_score);
3580 MERGE(current_picture.encoding_error[0]);
3581 MERGE(current_picture.encoding_error[1]);
3582 MERGE(current_picture.encoding_error[2]);
3584 if (dst->noise_reduction){
3585 for(i=0; i<64; i++){
3586 MERGE(dct_error_sum[0][i]);
3587 MERGE(dct_error_sum[1][i]);
3591 av_assert1(put_bits_count(&src->pb) % 8 ==0);
3592 av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3593 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3594 flush_put_bits(&dst->pb);
3597 static int estimate_qp(MpegEncContext *s, int dry_run){
3598 if (s->next_lambda){
3599 s->current_picture_ptr->f->quality =
3600 s->current_picture.f->quality = s->next_lambda;
3601 if(!dry_run) s->next_lambda= 0;
3602 } else if (!s->fixed_qscale) {
3603 int quality = ff_rate_estimate_qscale(s, dry_run);
3604 s->current_picture_ptr->f->quality =
3605 s->current_picture.f->quality = quality;
3606 if (s->current_picture.f->quality < 0)
3610 if(s->adaptive_quant){
3611 switch(s->codec_id){
3612 case AV_CODEC_ID_MPEG4:
3613 if (CONFIG_MPEG4_ENCODER)
3614 ff_clean_mpeg4_qscales(s);
3616 case AV_CODEC_ID_H263:
3617 case AV_CODEC_ID_H263P:
3618 case AV_CODEC_ID_FLV1:
3619 if (CONFIG_H263_ENCODER)
3620 ff_clean_h263_qscales(s);
3623 ff_init_qscale_tab(s);
3626 s->lambda= s->lambda_table[0];
3629 s->lambda = s->current_picture.f->quality;
3634 /* must be called before writing the header */
3635 static void set_frame_distances(MpegEncContext * s){
3636 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3637 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3639 if(s->pict_type==AV_PICTURE_TYPE_B){
3640 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3641 av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3643 s->pp_time= s->time - s->last_non_b_time;
3644 s->last_non_b_time= s->time;
3645 av_assert1(s->picture_number==0 || s->pp_time > 0);
3649 static int encode_picture(MpegEncContext *s, int picture_number)
3653 int context_count = s->slice_context_count;
3655 s->picture_number = picture_number;
3657 /* Reset the average MB variance */
3658 s->me.mb_var_sum_temp =
3659 s->me.mc_mb_var_sum_temp = 0;
3661 /* we need to initialize some time vars before we can encode B-frames */
3662 // RAL: Condition added for MPEG1VIDEO
3663 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3664 set_frame_distances(s);
3665 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3666 ff_set_mpeg4_time(s);
3668 s->me.scene_change_score=0;
3670 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3672 if(s->pict_type==AV_PICTURE_TYPE_I){
3673 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3674 else s->no_rounding=0;
3675 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3676 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3677 s->no_rounding ^= 1;
3680 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3681 if (estimate_qp(s,1) < 0)
3683 ff_get_2pass_fcode(s);
3684 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3685 if(s->pict_type==AV_PICTURE_TYPE_B)
3686 s->lambda= s->last_lambda_for[s->pict_type];
3688 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3692 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3693 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3694 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3695 s->q_chroma_intra_matrix = s->q_intra_matrix;
3696 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3699 s->mb_intra=0; //for the rate distortion & bit compare functions
3700 for(i=1; i<context_count; i++){
3701 ret = ff_update_duplicate_context(s->thread_context[i], s);
3709 /* Estimate motion for every MB */
3710 if(s->pict_type != AV_PICTURE_TYPE_I){
3711 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3712 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3713 if (s->pict_type != AV_PICTURE_TYPE_B) {
3714 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3716 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3720 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3721 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3723 for(i=0; i<s->mb_stride*s->mb_height; i++)
3724 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3726 if(!s->fixed_qscale){
3727 /* finding spatial complexity for I-frame rate control */
3728 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3731 for(i=1; i<context_count; i++){
3732 merge_context_after_me(s, s->thread_context[i]);
3734 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3735 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3738 if (s->me.scene_change_score > s->scenechange_threshold &&
3739 s->pict_type == AV_PICTURE_TYPE_P) {
3740 s->pict_type= AV_PICTURE_TYPE_I;
3741 for(i=0; i<s->mb_stride*s->mb_height; i++)
3742 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3743 if(s->msmpeg4_version >= 3)
3745 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3746 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3750 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3751 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3753 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3755 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3756 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3757 s->f_code= FFMAX3(s->f_code, a, b);
3760 ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
3761 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3762 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3766 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3767 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3772 if(s->pict_type==AV_PICTURE_TYPE_B){
3775 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3776 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3777 s->f_code = FFMAX(a, b);
3779 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3780 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3781 s->b_code = FFMAX(a, b);
3783 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3784 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3785 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3786 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3787 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3789 for(dir=0; dir<2; dir++){
3792 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3793 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3794 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3795 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3803 if (estimate_qp(s, 0) < 0)
3806 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3807 s->pict_type == AV_PICTURE_TYPE_I &&
3808 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3809 s->qscale= 3; //reduce clipping problems
3811 if (s->out_format == FMT_MJPEG) {
3812 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3813 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3815 if (s->avctx->intra_matrix) {
3817 luma_matrix = s->avctx->intra_matrix;
3819 if (s->avctx->chroma_intra_matrix)
3820 chroma_matrix = s->avctx->chroma_intra_matrix;
3822 /* for mjpeg, we do include qscale in the matrix */
3824 int j = s->idsp.idct_permutation[i];
3826 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3827 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3829 s->y_dc_scale_table=
3830 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3831 s->chroma_intra_matrix[0] =
3832 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3833 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3834 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3835 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3836 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3839 if(s->codec_id == AV_CODEC_ID_AMV){
3840 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3841 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3843 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3845 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3846 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3848 s->y_dc_scale_table= y;
3849 s->c_dc_scale_table= c;
3850 s->intra_matrix[0] = 13;
3851 s->chroma_intra_matrix[0] = 14;
3852 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3853 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3854 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3855 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3859 //FIXME var duplication
3860 s->current_picture_ptr->f->key_frame =
3861 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3862 s->current_picture_ptr->f->pict_type =
3863 s->current_picture.f->pict_type = s->pict_type;
3865 if (s->current_picture.f->key_frame)
3866 s->picture_in_gop_number=0;
3868 s->mb_x = s->mb_y = 0;
3869 s->last_bits= put_bits_count(&s->pb);
3870 switch(s->out_format) {
3872 if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3873 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3874 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3877 if (CONFIG_H261_ENCODER)
3878 ff_h261_encode_picture_header(s, picture_number);
3881 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3882 ff_wmv2_encode_picture_header(s, picture_number);
3883 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3884 ff_msmpeg4_encode_picture_header(s, picture_number);
3885 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3886 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3889 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3890 ret = ff_rv10_encode_picture_header(s, picture_number);
3894 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3895 ff_rv20_encode_picture_header(s, picture_number);
3896 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3897 ff_flv_encode_picture_header(s, picture_number);
3898 else if (CONFIG_H263_ENCODER)
3899 ff_h263_encode_picture_header(s, picture_number);
3902 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3903 ff_mpeg1_encode_picture_header(s, picture_number);
3908 bits= put_bits_count(&s->pb);
3909 s->header_bits= bits - s->last_bits;
3911 for(i=1; i<context_count; i++){
3912 update_duplicate_context_after_me(s->thread_context[i], s);
3914 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3915 for(i=1; i<context_count; i++){
3916 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3917 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3918 merge_context_after_encode(s, s->thread_context[i]);
3924 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3925 const int intra= s->mb_intra;
3928 s->dct_count[intra]++;
3930 for(i=0; i<64; i++){
3931 int level= block[i];
3935 s->dct_error_sum[intra][i] += level;
3936 level -= s->dct_offset[intra][i];
3937 if(level<0) level=0;
3939 s->dct_error_sum[intra][i] -= level;
3940 level += s->dct_offset[intra][i];
3941 if(level>0) level=0;
3948 static int dct_quantize_trellis_c(MpegEncContext *s,
3949 int16_t *block, int n,
3950 int qscale, int *overflow){
3952 const uint16_t *matrix;
3953 const uint8_t *scantable;
3954 const uint8_t *perm_scantable;
3956 unsigned int threshold1, threshold2;
3968 int coeff_count[64];
3969 int qmul, qadd, start_i, last_non_zero, i, dc;
3970 const int esc_length= s->ac_esc_length;
3972 uint8_t * last_length;
3973 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3976 s->fdsp.fdct(block);
3978 if(s->dct_error_sum)
3979 s->denoise_dct(s, block);
3981 qadd= ((qscale-1)|1)*8;
3983 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3984 else mpeg2_qscale = qscale << 1;
3988 scantable= s->intra_scantable.scantable;
3989 perm_scantable= s->intra_scantable.permutated;
3997 /* For AIC we skip quant/dequant of INTRADC */
4002 /* note: block[0] is assumed to be positive */
4003 block[0] = (block[0] + (q >> 1)) / q;
4006 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4007 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4008 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4009 bias= 1<<(QMAT_SHIFT-1);
4011 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4012 length = s->intra_chroma_ac_vlc_length;
4013 last_length= s->intra_chroma_ac_vlc_last_length;
4015 length = s->intra_ac_vlc_length;
4016 last_length= s->intra_ac_vlc_last_length;
4019 scantable= s->inter_scantable.scantable;
4020 perm_scantable= s->inter_scantable.permutated;
4023 qmat = s->q_inter_matrix[qscale];
4024 matrix = s->inter_matrix;
4025 length = s->inter_ac_vlc_length;
4026 last_length= s->inter_ac_vlc_last_length;
4030 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4031 threshold2= (threshold1<<1);
4033 for(i=63; i>=start_i; i--) {
4034 const int j = scantable[i];
4035 int level = block[j] * qmat[j];
4037 if(((unsigned)(level+threshold1))>threshold2){
4043 for(i=start_i; i<=last_non_zero; i++) {
4044 const int j = scantable[i];
4045 int level = block[j] * qmat[j];
4047 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4048 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4049 if(((unsigned)(level+threshold1))>threshold2){
4051 level= (bias + level)>>QMAT_SHIFT;
4053 coeff[1][i]= level-1;
4054 // coeff[2][k]= level-2;
4056 level= (bias - level)>>QMAT_SHIFT;
4057 coeff[0][i]= -level;
4058 coeff[1][i]= -level+1;
4059 // coeff[2][k]= -level+2;
4061 coeff_count[i]= FFMIN(level, 2);
4062 av_assert2(coeff_count[i]);
4065 coeff[0][i]= (level>>31)|1;
4070 *overflow= s->max_qcoeff < max; //overflow might have happened
4072 if(last_non_zero < start_i){
4073 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4074 return last_non_zero;
4077 score_tab[start_i]= 0;
4078 survivor[0]= start_i;
4081 for(i=start_i; i<=last_non_zero; i++){
4082 int level_index, j, zero_distortion;
4083 int dct_coeff= FFABS(block[ scantable[i] ]);
4084 int best_score=256*256*256*120;
4086 if (s->fdsp.fdct == ff_fdct_ifast)
4087 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4088 zero_distortion= dct_coeff*dct_coeff;
4090 for(level_index=0; level_index < coeff_count[i]; level_index++){
4092 int level= coeff[level_index][i];
4093 const int alevel= FFABS(level);
4098 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4099 unquant_coeff= alevel*qmul + qadd;
4100 } else if(s->out_format == FMT_MJPEG) {
4101 j = s->idsp.idct_permutation[scantable[i]];
4102 unquant_coeff = alevel * matrix[j] * 8;
4104 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4106 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4107 unquant_coeff = (unquant_coeff - 1) | 1;
4109 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4110 unquant_coeff = (unquant_coeff - 1) | 1;
4115 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4117 if((level&(~127)) == 0){
4118 for(j=survivor_count-1; j>=0; j--){
4119 int run= i - survivor[j];
4120 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4121 score += score_tab[i-run];
4123 if(score < best_score){
4126 level_tab[i+1]= level-64;
4130 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4131 for(j=survivor_count-1; j>=0; j--){
4132 int run= i - survivor[j];
4133 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4134 score += score_tab[i-run];
4135 if(score < last_score){
4138 last_level= level-64;
4144 distortion += esc_length*lambda;
4145 for(j=survivor_count-1; j>=0; j--){
4146 int run= i - survivor[j];
4147 int score= distortion + score_tab[i-run];
4149 if(score < best_score){
4152 level_tab[i+1]= level-64;
4156 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4157 for(j=survivor_count-1; j>=0; j--){
4158 int run= i - survivor[j];
4159 int score= distortion + score_tab[i-run];
4160 if(score < last_score){
4163 last_level= level-64;
4171 score_tab[i+1]= best_score;
4173 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4174 if(last_non_zero <= 27){
4175 for(; survivor_count; survivor_count--){
4176 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4180 for(; survivor_count; survivor_count--){
4181 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4186 survivor[ survivor_count++ ]= i+1;
4189 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4190 last_score= 256*256*256*120;
4191 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4192 int score= score_tab[i];
4194 score += lambda * 2; // FIXME more exact?
4196 if(score < last_score){
4199 last_level= level_tab[i];
4200 last_run= run_tab[i];
4205 s->coded_score[n] = last_score;
4207 dc= FFABS(block[0]);
4208 last_non_zero= last_i - 1;
4209 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4211 if(last_non_zero < start_i)
4212 return last_non_zero;
4214 if(last_non_zero == 0 && start_i == 0){
4216 int best_score= dc * dc;
4218 for(i=0; i<coeff_count[0]; i++){
4219 int level= coeff[i][0];
4220 int alevel= FFABS(level);
4221 int unquant_coeff, score, distortion;
4223 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4224 unquant_coeff= (alevel*qmul + qadd)>>3;
4226 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4227 unquant_coeff = (unquant_coeff - 1) | 1;
4229 unquant_coeff = (unquant_coeff + 4) >> 3;
4230 unquant_coeff<<= 3 + 3;
4232 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4234 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4235 else score= distortion + esc_length*lambda;
4237 if(score < best_score){
4239 best_level= level - 64;
4242 block[0]= best_level;
4243 s->coded_score[n] = best_score - dc*dc;
4244 if(best_level == 0) return -1;
4245 else return last_non_zero;
4249 av_assert2(last_level);
4251 block[ perm_scantable[last_non_zero] ]= last_level;
4254 for(; i>start_i; i -= run_tab[i] + 1){
4255 block[ perm_scantable[i-1] ]= level_tab[i];
4258 return last_non_zero;
4261 static int16_t basis[64][64];
4263 static void build_basis(uint8_t *perm){
4270 double s= 0.25*(1<<BASIS_SHIFT);
4272 int perm_index= perm[index];
4273 if(i==0) s*= sqrt(0.5);
4274 if(j==0) s*= sqrt(0.5);
4275 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4282 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4283 int16_t *block, int16_t *weight, int16_t *orig,
4286 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4287 const uint8_t *scantable;
4288 const uint8_t *perm_scantable;
4289 // unsigned int threshold1, threshold2;
4294 int qmul, qadd, start_i, last_non_zero, i, dc;
4296 uint8_t * last_length;
4298 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4300 if(basis[0][0] == 0)
4301 build_basis(s->idsp.idct_permutation);
4306 scantable= s->intra_scantable.scantable;
4307 perm_scantable= s->intra_scantable.permutated;
4314 /* For AIC we skip quant/dequant of INTRADC */
4318 q <<= RECON_SHIFT-3;
4319 /* note: block[0] is assumed to be positive */
4321 // block[0] = (block[0] + (q >> 1)) / q;
4323 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4324 // bias= 1<<(QMAT_SHIFT-1);
4325 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4326 length = s->intra_chroma_ac_vlc_length;
4327 last_length= s->intra_chroma_ac_vlc_last_length;
4329 length = s->intra_ac_vlc_length;
4330 last_length= s->intra_ac_vlc_last_length;
4333 scantable= s->inter_scantable.scantable;
4334 perm_scantable= s->inter_scantable.permutated;
4337 length = s->inter_ac_vlc_length;
4338 last_length= s->inter_ac_vlc_last_length;
4340 last_non_zero = s->block_last_index[n];
4342 dc += (1<<(RECON_SHIFT-1));
4343 for(i=0; i<64; i++){
4344 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4348 for(i=0; i<64; i++){
4353 w= FFABS(weight[i]) + qns*one;
4354 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4357 // w=weight[i] = (63*qns + (w/2)) / w;
4360 av_assert2(w<(1<<6));
4363 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4367 for(i=start_i; i<=last_non_zero; i++){
4368 int j= perm_scantable[i];
4369 const int level= block[j];
4373 if(level<0) coeff= qmul*level - qadd;
4374 else coeff= qmul*level + qadd;
4375 run_tab[rle_index++]=run;
4378 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4385 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4388 int run2, best_unquant_change=0, analyze_gradient;
4389 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4391 if(analyze_gradient){
4392 for(i=0; i<64; i++){
4395 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4401 const int level= block[0];
4402 int change, old_coeff;
4404 av_assert2(s->mb_intra);
4408 for(change=-1; change<=1; change+=2){
4409 int new_level= level + change;
4410 int score, new_coeff;
4412 new_coeff= q*new_level;
4413 if(new_coeff >= 2048 || new_coeff < 0)
4416 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4417 new_coeff - old_coeff);
4418 if(score<best_score){
4421 best_change= change;
4422 best_unquant_change= new_coeff - old_coeff;
4429 run2= run_tab[rle_index++];
4433 for(i=start_i; i<64; i++){
4434 int j= perm_scantable[i];
4435 const int level= block[j];
4436 int change, old_coeff;
4438 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4442 if(level<0) old_coeff= qmul*level - qadd;
4443 else old_coeff= qmul*level + qadd;
4444 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4448 av_assert2(run2>=0 || i >= last_non_zero );
4451 for(change=-1; change<=1; change+=2){
4452 int new_level= level + change;
4453 int score, new_coeff, unquant_change;
4456 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4460 if(new_level<0) new_coeff= qmul*new_level - qadd;
4461 else new_coeff= qmul*new_level + qadd;
4462 if(new_coeff >= 2048 || new_coeff <= -2048)
4464 //FIXME check for overflow
4467 if(level < 63 && level > -63){
4468 if(i < last_non_zero)
4469 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4470 - length[UNI_AC_ENC_INDEX(run, level+64)];
4472 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4473 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4476 av_assert2(FFABS(new_level)==1);
4478 if(analyze_gradient){
4479 int g= d1[ scantable[i] ];
4480 if(g && (g^new_level) >= 0)
4484 if(i < last_non_zero){
4485 int next_i= i + run2 + 1;
4486 int next_level= block[ perm_scantable[next_i] ] + 64;
4488 if(next_level&(~127))
4491 if(next_i < last_non_zero)
4492 score += length[UNI_AC_ENC_INDEX(run, 65)]
4493 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4494 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4496 score += length[UNI_AC_ENC_INDEX(run, 65)]
4497 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4498 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4500 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4502 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4503 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4509 av_assert2(FFABS(level)==1);
4511 if(i < last_non_zero){
4512 int next_i= i + run2 + 1;
4513 int next_level= block[ perm_scantable[next_i] ] + 64;
4515 if(next_level&(~127))
4518 if(next_i < last_non_zero)
4519 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4520 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4521 - length[UNI_AC_ENC_INDEX(run, 65)];
4523 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4524 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4525 - length[UNI_AC_ENC_INDEX(run, 65)];
4527 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4529 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4530 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4537 unquant_change= new_coeff - old_coeff;
4538 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4540 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4542 if(score<best_score){
4545 best_change= change;
4546 best_unquant_change= unquant_change;
4550 prev_level= level + 64;
4551 if(prev_level&(~127))
4561 int j= perm_scantable[ best_coeff ];
4563 block[j] += best_change;
4565 if(best_coeff > last_non_zero){
4566 last_non_zero= best_coeff;
4567 av_assert2(block[j]);
4569 for(; last_non_zero>=start_i; last_non_zero--){
4570 if(block[perm_scantable[last_non_zero]])
4577 for(i=start_i; i<=last_non_zero; i++){
4578 int j= perm_scantable[i];
4579 const int level= block[j];
4582 run_tab[rle_index++]=run;
4589 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4595 return last_non_zero;
4599 * Permute an 8x8 block according to permutation.
4600 * @param block the block which will be permuted according to
4601 * the given permutation vector
4602 * @param permutation the permutation vector
4603 * @param last the last non zero coefficient in scantable order, used to
4604 * speed the permutation up
4605 * @param scantable the used scantable, this is only used to speed the
4606 * permutation up, the block is not (inverse) permutated
4607 * to scantable order!
4609 void ff_block_permute(int16_t *block, uint8_t *permutation,
4610 const uint8_t *scantable, int last)
4617 //FIXME it is ok but not clean and might fail for some permutations
4618 // if (permutation[1] == 1)
4621 for (i = 0; i <= last; i++) {
4622 const int j = scantable[i];
4627 for (i = 0; i <= last; i++) {
4628 const int j = scantable[i];
4629 const int perm_j = permutation[j];
4630 block[perm_j] = temp[j];
4634 int ff_dct_quantize_c(MpegEncContext *s,
4635 int16_t *block, int n,
4636 int qscale, int *overflow)
4638 int i, j, level, last_non_zero, q, start_i;
4640 const uint8_t *scantable;
4643 unsigned int threshold1, threshold2;
4645 s->fdsp.fdct(block);
4647 if(s->dct_error_sum)
4648 s->denoise_dct(s, block);
4651 scantable= s->intra_scantable.scantable;
4659 /* For AIC we skip quant/dequant of INTRADC */
4662 /* note: block[0] is assumed to be positive */
4663 block[0] = (block[0] + (q >> 1)) / q;
4666 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4667 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4669 scantable= s->inter_scantable.scantable;
4672 qmat = s->q_inter_matrix[qscale];
4673 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4675 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4676 threshold2= (threshold1<<1);
4677 for(i=63;i>=start_i;i--) {
4679 level = block[j] * qmat[j];
4681 if(((unsigned)(level+threshold1))>threshold2){
4688 for(i=start_i; i<=last_non_zero; i++) {
4690 level = block[j] * qmat[j];
4692 // if( bias+level >= (1<<QMAT_SHIFT)
4693 // || bias-level >= (1<<QMAT_SHIFT)){
4694 if(((unsigned)(level+threshold1))>threshold2){
4696 level= (bias + level)>>QMAT_SHIFT;
4699 level= (bias - level)>>QMAT_SHIFT;
4707 *overflow= s->max_qcoeff < max; //overflow might have happened
4709 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4710 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4711 ff_block_permute(block, s->idsp.idct_permutation,
4712 scantable, last_non_zero);
4714 return last_non_zero;
4717 #define OFFSET(x) offsetof(MpegEncContext, x)
4718 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4719 static const AVOption h263_options[] = {
4720 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4721 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4726 static const AVClass h263_class = {
4727 .class_name = "H.263 encoder",
4728 .item_name = av_default_item_name,
4729 .option = h263_options,
4730 .version = LIBAVUTIL_VERSION_INT,
4733 AVCodec ff_h263_encoder = {
4735 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4736 .type = AVMEDIA_TYPE_VIDEO,
4737 .id = AV_CODEC_ID_H263,
4738 .priv_data_size = sizeof(MpegEncContext),
4739 .init = ff_mpv_encode_init,
4740 .encode2 = ff_mpv_encode_picture,
4741 .close = ff_mpv_encode_end,
4742 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4743 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4744 .priv_class = &h263_class,
4747 static const AVOption h263p_options[] = {
4748 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4749 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4750 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4751 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4755 static const AVClass h263p_class = {
4756 .class_name = "H.263p encoder",
4757 .item_name = av_default_item_name,
4758 .option = h263p_options,
4759 .version = LIBAVUTIL_VERSION_INT,
4762 AVCodec ff_h263p_encoder = {
4764 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4765 .type = AVMEDIA_TYPE_VIDEO,
4766 .id = AV_CODEC_ID_H263P,
4767 .priv_data_size = sizeof(MpegEncContext),
4768 .init = ff_mpv_encode_init,
4769 .encode2 = ff_mpv_encode_picture,
4770 .close = ff_mpv_encode_end,
4771 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4772 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4773 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4774 .priv_class = &h263p_class,
4777 static const AVClass msmpeg4v2_class = {
4778 .class_name = "msmpeg4v2 encoder",
4779 .item_name = av_default_item_name,
4780 .option = ff_mpv_generic_options,
4781 .version = LIBAVUTIL_VERSION_INT,
4784 AVCodec ff_msmpeg4v2_encoder = {
4785 .name = "msmpeg4v2",
4786 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4787 .type = AVMEDIA_TYPE_VIDEO,
4788 .id = AV_CODEC_ID_MSMPEG4V2,
4789 .priv_data_size = sizeof(MpegEncContext),
4790 .init = ff_mpv_encode_init,
4791 .encode2 = ff_mpv_encode_picture,
4792 .close = ff_mpv_encode_end,
4793 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4794 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4795 .priv_class = &msmpeg4v2_class,
4798 static const AVClass msmpeg4v3_class = {
4799 .class_name = "msmpeg4v3 encoder",
4800 .item_name = av_default_item_name,
4801 .option = ff_mpv_generic_options,
4802 .version = LIBAVUTIL_VERSION_INT,
4805 AVCodec ff_msmpeg4v3_encoder = {
4807 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4808 .type = AVMEDIA_TYPE_VIDEO,
4809 .id = AV_CODEC_ID_MSMPEG4V3,
4810 .priv_data_size = sizeof(MpegEncContext),
4811 .init = ff_mpv_encode_init,
4812 .encode2 = ff_mpv_encode_picture,
4813 .close = ff_mpv_encode_end,
4814 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4815 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4816 .priv_class = &msmpeg4v3_class,
4819 static const AVClass wmv1_class = {
4820 .class_name = "wmv1 encoder",
4821 .item_name = av_default_item_name,
4822 .option = ff_mpv_generic_options,
4823 .version = LIBAVUTIL_VERSION_INT,
4826 AVCodec ff_wmv1_encoder = {
4828 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4829 .type = AVMEDIA_TYPE_VIDEO,
4830 .id = AV_CODEC_ID_WMV1,
4831 .priv_data_size = sizeof(MpegEncContext),
4832 .init = ff_mpv_encode_init,
4833 .encode2 = ff_mpv_encode_picture,
4834 .close = ff_mpv_encode_end,
4835 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4836 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4837 .priv_class = &wmv1_class,