2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/mem_internal.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/thread.h"
47 #include "mpegvideo.h"
48 #include "mpegvideodata.h"
52 #include "mjpegenc_common.h"
54 #include "mpegutils.h"
56 #include "speedhqenc.h"
58 #include "pixblockdsp.h"
62 #include "aandcttab.h"
64 #include "mpeg4video.h"
66 #include "bytestream.h"
69 #include "packet_internal.h"
73 #define QUANT_BIAS_SHIFT 8
75 #define QMAT_SHIFT_MMX 16
78 static int encode_picture(MpegEncContext *s, int picture_number);
79 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
80 static int sse_mb(MpegEncContext *s);
81 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
82 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
84 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
85 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
87 const AVOption ff_mpv_generic_options[] = {
92 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
93 uint16_t (*qmat16)[2][64],
94 const uint16_t *quant_matrix,
95 int bias, int qmin, int qmax, int intra)
97 FDCTDSPContext *fdsp = &s->fdsp;
101 for (qscale = qmin; qscale <= qmax; qscale++) {
105 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
106 else qscale2 = qscale << 1;
108 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
110 fdsp->fdct == ff_faandct ||
111 #endif /* CONFIG_FAANDCT */
112 fdsp->fdct == ff_jpeg_fdct_islow_10) {
113 for (i = 0; i < 64; i++) {
114 const int j = s->idsp.idct_permutation[i];
115 int64_t den = (int64_t) qscale2 * quant_matrix[j];
116 /* 16 <= qscale * quant_matrix[i] <= 7905
117 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
118 * 19952 <= x <= 249205026
119 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
120 * 3444240 >= (1 << 36) / (x) >= 275 */
122 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
124 } else if (fdsp->fdct == ff_fdct_ifast) {
125 for (i = 0; i < 64; i++) {
126 const int j = s->idsp.idct_permutation[i];
127 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
128 /* 16 <= qscale * quant_matrix[i] <= 7905
129 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
130 * 19952 <= x <= 249205026
131 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
132 * 3444240 >= (1 << 36) / (x) >= 275 */
134 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
137 for (i = 0; i < 64; i++) {
138 const int j = s->idsp.idct_permutation[i];
139 int64_t den = (int64_t) qscale2 * quant_matrix[j];
140 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
141 * Assume x = qscale * quant_matrix[i]
143 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
144 * so 32768 >= (1 << 19) / (x) >= 67 */
145 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
146 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
147 // (qscale * quant_matrix[i]);
148 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
150 if (qmat16[qscale][0][i] == 0 ||
151 qmat16[qscale][0][i] == 128 * 256)
152 qmat16[qscale][0][i] = 128 * 256 - 1;
153 qmat16[qscale][1][i] =
154 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
155 qmat16[qscale][0][i]);
159 for (i = intra; i < 64; i++) {
161 if (fdsp->fdct == ff_fdct_ifast) {
162 max = (8191LL * ff_aanscales[i]) >> 14;
164 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
170 av_log(s->avctx, AV_LOG_INFO,
171 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
176 static inline void update_qscale(MpegEncContext *s)
178 if (s->q_scale_type == 1 && 0) {
180 int bestdiff=INT_MAX;
183 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
184 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
185 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
186 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
188 if (diff < bestdiff) {
195 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
196 (FF_LAMBDA_SHIFT + 7);
197 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
200 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
204 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
210 for (i = 0; i < 64; i++) {
211 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
218 * init s->current_picture.qscale_table from s->lambda_table
220 void ff_init_qscale_tab(MpegEncContext *s)
222 int8_t * const qscale_table = s->current_picture.qscale_table;
225 for (i = 0; i < s->mb_num; i++) {
226 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
227 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
228 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
233 static void update_duplicate_context_after_me(MpegEncContext *dst,
236 #define COPY(a) dst->a= src->a
238 COPY(current_picture);
244 COPY(picture_in_gop_number);
245 COPY(gop_picture_number);
246 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
247 COPY(progressive_frame); // FIXME don't set in encode_header
248 COPY(partitioned_frame); // FIXME don't set in encode_header
252 static void mpv_encode_init_static(void)
254 for (int i = -16; i < 16; i++)
255 default_fcode_tab[i + MAX_MV] = 1;
259 * Set the given MpegEncContext to defaults for encoding.
260 * the changed fields will not depend upon the prior state of the MpegEncContext.
262 static void mpv_encode_defaults(MpegEncContext *s)
264 static AVOnce init_static_once = AV_ONCE_INIT;
266 ff_mpv_common_defaults(s);
268 ff_thread_once(&init_static_once, mpv_encode_init_static);
270 s->me.mv_penalty = default_mv_penalty;
271 s->fcode_tab = default_fcode_tab;
273 s->input_picture_number = 0;
274 s->picture_in_gop_number = 0;
277 av_cold int ff_dct_encode_init(MpegEncContext *s)
280 ff_dct_encode_init_x86(s);
282 if (CONFIG_H263_ENCODER)
283 ff_h263dsp_init(&s->h263dsp);
284 if (!s->dct_quantize)
285 s->dct_quantize = ff_dct_quantize_c;
287 s->denoise_dct = denoise_dct_c;
288 s->fast_dct_quantize = s->dct_quantize;
289 if (s->avctx->trellis)
290 s->dct_quantize = dct_quantize_trellis_c;
295 /* init video encoder */
296 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
298 MpegEncContext *s = avctx->priv_data;
299 AVCPBProperties *cpb_props;
300 int i, ret, format_supported;
302 mpv_encode_defaults(s);
304 switch (avctx->codec_id) {
305 case AV_CODEC_ID_MJPEG:
306 format_supported = 0;
307 /* JPEG color space */
308 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
309 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
310 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
311 (avctx->color_range == AVCOL_RANGE_JPEG &&
312 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
313 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
314 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
315 format_supported = 1;
316 /* MPEG color space */
317 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
318 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
319 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
320 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
321 format_supported = 1;
323 if (!format_supported) {
324 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
325 return AVERROR(EINVAL);
330 switch (avctx->pix_fmt) {
331 case AV_PIX_FMT_YUVJ444P:
332 case AV_PIX_FMT_YUV444P:
333 s->chroma_format = CHROMA_444;
335 case AV_PIX_FMT_YUVJ422P:
336 case AV_PIX_FMT_YUV422P:
337 s->chroma_format = CHROMA_422;
339 case AV_PIX_FMT_YUVJ420P:
340 case AV_PIX_FMT_YUV420P:
342 s->chroma_format = CHROMA_420;
346 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
348 #if FF_API_PRIVATE_OPT
349 FF_DISABLE_DEPRECATION_WARNINGS
350 if (avctx->rtp_payload_size)
351 s->rtp_payload_size = avctx->rtp_payload_size;
352 if (avctx->me_penalty_compensation)
353 s->me_penalty_compensation = avctx->me_penalty_compensation;
355 s->me_pre = avctx->pre_me;
356 FF_ENABLE_DEPRECATION_WARNINGS
359 s->bit_rate = avctx->bit_rate;
360 s->width = avctx->width;
361 s->height = avctx->height;
362 if (avctx->gop_size > 600 &&
363 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
364 av_log(avctx, AV_LOG_WARNING,
365 "keyframe interval too large!, reducing it from %d to %d\n",
366 avctx->gop_size, 600);
367 avctx->gop_size = 600;
369 s->gop_size = avctx->gop_size;
371 if (avctx->max_b_frames > MAX_B_FRAMES) {
372 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
373 "is %d.\n", MAX_B_FRAMES);
374 avctx->max_b_frames = MAX_B_FRAMES;
376 s->max_b_frames = avctx->max_b_frames;
377 s->codec_id = avctx->codec->id;
378 s->strict_std_compliance = avctx->strict_std_compliance;
379 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
380 s->rtp_mode = !!s->rtp_payload_size;
381 s->intra_dc_precision = avctx->intra_dc_precision;
383 // workaround some differences between how applications specify dc precision
384 if (s->intra_dc_precision < 0) {
385 s->intra_dc_precision += 8;
386 } else if (s->intra_dc_precision >= 8)
387 s->intra_dc_precision -= 8;
389 if (s->intra_dc_precision < 0) {
390 av_log(avctx, AV_LOG_ERROR,
391 "intra dc precision must be positive, note some applications use"
392 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
393 return AVERROR(EINVAL);
396 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
399 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
400 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
401 return AVERROR(EINVAL);
403 s->user_specified_pts = AV_NOPTS_VALUE;
405 if (s->gop_size <= 1) {
413 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
415 s->adaptive_quant = (avctx->lumi_masking ||
416 avctx->dark_masking ||
417 avctx->temporal_cplx_masking ||
418 avctx->spatial_cplx_masking ||
421 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
424 s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
426 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
427 switch(avctx->codec_id) {
428 case AV_CODEC_ID_MPEG1VIDEO:
429 case AV_CODEC_ID_MPEG2VIDEO:
430 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
432 case AV_CODEC_ID_MPEG4:
433 case AV_CODEC_ID_MSMPEG4V1:
434 case AV_CODEC_ID_MSMPEG4V2:
435 case AV_CODEC_ID_MSMPEG4V3:
436 if (avctx->rc_max_rate >= 15000000) {
437 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
438 } else if(avctx->rc_max_rate >= 2000000) {
439 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
440 } else if(avctx->rc_max_rate >= 384000) {
441 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
443 avctx->rc_buffer_size = 40;
444 avctx->rc_buffer_size *= 16384;
447 if (avctx->rc_buffer_size) {
448 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
452 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
453 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
454 return AVERROR(EINVAL);
457 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
458 av_log(avctx, AV_LOG_INFO,
459 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
462 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
463 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
464 return AVERROR(EINVAL);
467 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
468 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
469 return AVERROR(EINVAL);
472 if (avctx->rc_max_rate &&
473 avctx->rc_max_rate == avctx->bit_rate &&
474 avctx->rc_max_rate != avctx->rc_min_rate) {
475 av_log(avctx, AV_LOG_INFO,
476 "impossible bitrate constraints, this will fail\n");
479 if (avctx->rc_buffer_size &&
480 avctx->bit_rate * (int64_t)avctx->time_base.num >
481 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
482 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
483 return AVERROR(EINVAL);
486 if (!s->fixed_qscale &&
487 avctx->bit_rate * av_q2d(avctx->time_base) >
488 avctx->bit_rate_tolerance) {
489 av_log(avctx, AV_LOG_WARNING,
490 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
491 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
494 if (avctx->rc_max_rate &&
495 avctx->rc_min_rate == avctx->rc_max_rate &&
496 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
497 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
498 90000LL * (avctx->rc_buffer_size - 1) >
499 avctx->rc_max_rate * 0xFFFFLL) {
500 av_log(avctx, AV_LOG_INFO,
501 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
502 "specified vbv buffer is too large for the given bitrate!\n");
505 if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
506 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
507 s->codec_id != AV_CODEC_ID_FLV1) {
508 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
509 return AVERROR(EINVAL);
512 if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
513 av_log(avctx, AV_LOG_ERROR,
514 "OBMC is only supported with simple mb decision\n");
515 return AVERROR(EINVAL);
518 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
519 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
520 return AVERROR(EINVAL);
523 if (s->max_b_frames &&
524 s->codec_id != AV_CODEC_ID_MPEG4 &&
525 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
526 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
527 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
528 return AVERROR(EINVAL);
530 if (s->max_b_frames < 0) {
531 av_log(avctx, AV_LOG_ERROR,
532 "max b frames must be 0 or positive for mpegvideo based encoders\n");
533 return AVERROR(EINVAL);
536 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
537 s->codec_id == AV_CODEC_ID_H263 ||
538 s->codec_id == AV_CODEC_ID_H263P) &&
539 (avctx->sample_aspect_ratio.num > 255 ||
540 avctx->sample_aspect_ratio.den > 255)) {
541 av_log(avctx, AV_LOG_WARNING,
542 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
543 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
544 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
545 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
548 if ((s->codec_id == AV_CODEC_ID_H263 ||
549 s->codec_id == AV_CODEC_ID_H263P) &&
550 (avctx->width > 2048 ||
551 avctx->height > 1152 )) {
552 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
553 return AVERROR(EINVAL);
555 if ((s->codec_id == AV_CODEC_ID_H263 ||
556 s->codec_id == AV_CODEC_ID_H263P) &&
557 ((avctx->width &3) ||
558 (avctx->height&3) )) {
559 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
560 return AVERROR(EINVAL);
563 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
564 (avctx->width > 4095 ||
565 avctx->height > 4095 )) {
566 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
567 return AVERROR(EINVAL);
570 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
571 (avctx->width > 16383 ||
572 avctx->height > 16383 )) {
573 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
574 return AVERROR(EINVAL);
577 if (s->codec_id == AV_CODEC_ID_RV10 &&
579 avctx->height&15 )) {
580 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
581 return AVERROR(EINVAL);
584 if (s->codec_id == AV_CODEC_ID_RV20 &&
587 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
588 return AVERROR(EINVAL);
591 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
592 s->codec_id == AV_CODEC_ID_WMV2) &&
594 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
595 return AVERROR(EINVAL);
598 if ((avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
599 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
600 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
601 return AVERROR(EINVAL);
604 #if FF_API_PRIVATE_OPT
605 FF_DISABLE_DEPRECATION_WARNINGS
606 if (avctx->mpeg_quant)
608 FF_ENABLE_DEPRECATION_WARNINGS
611 // FIXME mpeg2 uses that too
612 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
613 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
614 av_log(avctx, AV_LOG_ERROR,
615 "mpeg2 style quantization not supported by codec\n");
616 return AVERROR(EINVAL);
619 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
620 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
621 return AVERROR(EINVAL);
624 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
625 avctx->mb_decision != FF_MB_DECISION_RD) {
626 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
627 return AVERROR(EINVAL);
630 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
631 (s->codec_id == AV_CODEC_ID_AMV ||
632 s->codec_id == AV_CODEC_ID_MJPEG)) {
633 // Used to produce garbage with MJPEG.
634 av_log(avctx, AV_LOG_ERROR,
635 "QP RD is no longer compatible with MJPEG or AMV\n");
636 return AVERROR(EINVAL);
639 #if FF_API_PRIVATE_OPT
640 FF_DISABLE_DEPRECATION_WARNINGS
641 if (avctx->scenechange_threshold)
642 s->scenechange_threshold = avctx->scenechange_threshold;
643 FF_ENABLE_DEPRECATION_WARNINGS
646 if (s->scenechange_threshold < 1000000000 &&
647 (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
648 av_log(avctx, AV_LOG_ERROR,
649 "closed gop with scene change detection are not supported yet, "
650 "set threshold to 1000000000\n");
651 return AVERROR_PATCHWELCOME;
654 if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
655 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
656 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
657 av_log(avctx, AV_LOG_ERROR,
658 "low delay forcing is only available for mpeg2, "
659 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
660 return AVERROR(EINVAL);
662 if (s->max_b_frames != 0) {
663 av_log(avctx, AV_LOG_ERROR,
664 "B-frames cannot be used with low delay\n");
665 return AVERROR(EINVAL);
669 if (s->q_scale_type == 1) {
670 if (avctx->qmax > 28) {
671 av_log(avctx, AV_LOG_ERROR,
672 "non linear quant only supports qmax <= 28 currently\n");
673 return AVERROR_PATCHWELCOME;
677 if (avctx->slices > 1 &&
678 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
679 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
680 return AVERROR(EINVAL);
683 if (avctx->thread_count > 1 &&
684 s->codec_id != AV_CODEC_ID_MPEG4 &&
685 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
686 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
687 s->codec_id != AV_CODEC_ID_MJPEG &&
688 (s->codec_id != AV_CODEC_ID_H263P)) {
689 av_log(avctx, AV_LOG_ERROR,
690 "multi threaded encoding not supported by codec\n");
691 return AVERROR_PATCHWELCOME;
694 if (avctx->thread_count < 1) {
695 av_log(avctx, AV_LOG_ERROR,
696 "automatic thread number detection not supported by codec, "
698 return AVERROR_PATCHWELCOME;
701 if (!avctx->time_base.den || !avctx->time_base.num) {
702 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
703 return AVERROR(EINVAL);
706 #if FF_API_PRIVATE_OPT
707 FF_DISABLE_DEPRECATION_WARNINGS
708 if (avctx->b_frame_strategy)
709 s->b_frame_strategy = avctx->b_frame_strategy;
710 if (avctx->b_sensitivity != 40)
711 s->b_sensitivity = avctx->b_sensitivity;
712 FF_ENABLE_DEPRECATION_WARNINGS
715 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
716 av_log(avctx, AV_LOG_INFO,
717 "notice: b_frame_strategy only affects the first pass\n");
718 s->b_frame_strategy = 0;
721 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
723 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
724 avctx->time_base.den /= i;
725 avctx->time_base.num /= i;
729 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
730 // (a + x * 3 / 8) / x
731 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
732 s->inter_quant_bias = 0;
734 s->intra_quant_bias = 0;
736 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
739 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
740 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
741 return AVERROR(EINVAL);
744 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
746 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
747 avctx->time_base.den > (1 << 16) - 1) {
748 av_log(avctx, AV_LOG_ERROR,
749 "timebase %d/%d not supported by MPEG 4 standard, "
750 "the maximum admitted value for the timebase denominator "
751 "is %d\n", avctx->time_base.num, avctx->time_base.den,
753 return AVERROR(EINVAL);
755 s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
757 switch (avctx->codec->id) {
758 case AV_CODEC_ID_MPEG1VIDEO:
759 s->out_format = FMT_MPEG1;
760 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
761 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
763 case AV_CODEC_ID_MPEG2VIDEO:
764 s->out_format = FMT_MPEG1;
765 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
766 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
769 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
770 case AV_CODEC_ID_MJPEG:
771 case AV_CODEC_ID_AMV:
772 s->out_format = FMT_MJPEG;
773 s->intra_only = 1; /* force intra only for jpeg */
774 if ((ret = ff_mjpeg_encode_init(s)) < 0)
780 case AV_CODEC_ID_SPEEDHQ:
781 s->out_format = FMT_SPEEDHQ;
782 s->intra_only = 1; /* force intra only for SHQ */
783 if (!CONFIG_SPEEDHQ_ENCODER)
784 return AVERROR_ENCODER_NOT_FOUND;
785 if ((ret = ff_speedhq_encode_init(s)) < 0)
790 case AV_CODEC_ID_H261:
791 if (!CONFIG_H261_ENCODER)
792 return AVERROR_ENCODER_NOT_FOUND;
793 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
794 av_log(avctx, AV_LOG_ERROR,
795 "The specified picture size of %dx%d is not valid for the "
796 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
797 s->width, s->height);
798 return AVERROR(EINVAL);
800 s->out_format = FMT_H261;
803 s->rtp_mode = 0; /* Sliced encoding not supported */
805 case AV_CODEC_ID_H263:
806 if (!CONFIG_H263_ENCODER)
807 return AVERROR_ENCODER_NOT_FOUND;
808 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
809 s->width, s->height) == 8) {
810 av_log(avctx, AV_LOG_ERROR,
811 "The specified picture size of %dx%d is not valid for "
812 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
813 "352x288, 704x576, and 1408x1152. "
814 "Try H.263+.\n", s->width, s->height);
815 return AVERROR(EINVAL);
817 s->out_format = FMT_H263;
821 case AV_CODEC_ID_H263P:
822 s->out_format = FMT_H263;
825 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
826 s->modified_quant = s->h263_aic;
827 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
828 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
831 /* These are just to be sure */
835 case AV_CODEC_ID_FLV1:
836 s->out_format = FMT_H263;
837 s->h263_flv = 2; /* format = 1; 11-bit codes */
838 s->unrestricted_mv = 1;
839 s->rtp_mode = 0; /* don't allow GOB */
843 case AV_CODEC_ID_RV10:
844 s->out_format = FMT_H263;
848 case AV_CODEC_ID_RV20:
849 s->out_format = FMT_H263;
852 s->modified_quant = 1;
856 s->unrestricted_mv = 0;
858 case AV_CODEC_ID_MPEG4:
859 s->out_format = FMT_H263;
861 s->unrestricted_mv = 1;
862 s->low_delay = s->max_b_frames ? 0 : 1;
863 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
865 case AV_CODEC_ID_MSMPEG4V2:
866 s->out_format = FMT_H263;
868 s->unrestricted_mv = 1;
869 s->msmpeg4_version = 2;
873 case AV_CODEC_ID_MSMPEG4V3:
874 s->out_format = FMT_H263;
876 s->unrestricted_mv = 1;
877 s->msmpeg4_version = 3;
878 s->flipflop_rounding = 1;
882 case AV_CODEC_ID_WMV1:
883 s->out_format = FMT_H263;
885 s->unrestricted_mv = 1;
886 s->msmpeg4_version = 4;
887 s->flipflop_rounding = 1;
891 case AV_CODEC_ID_WMV2:
892 s->out_format = FMT_H263;
894 s->unrestricted_mv = 1;
895 s->msmpeg4_version = 5;
896 s->flipflop_rounding = 1;
901 return AVERROR(EINVAL);
904 #if FF_API_PRIVATE_OPT
905 FF_DISABLE_DEPRECATION_WARNINGS
906 if (avctx->noise_reduction)
907 s->noise_reduction = avctx->noise_reduction;
908 FF_ENABLE_DEPRECATION_WARNINGS
911 avctx->has_b_frames = !s->low_delay;
915 s->progressive_frame =
916 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
917 AV_CODEC_FLAG_INTERLACED_ME) ||
922 if ((ret = ff_mpv_common_init(s)) < 0)
925 ff_fdctdsp_init(&s->fdsp, avctx);
926 ff_me_cmp_init(&s->mecc, avctx);
927 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
928 ff_pixblockdsp_init(&s->pdsp, avctx);
929 ff_qpeldsp_init(&s->qdsp);
931 if (s->msmpeg4_version) {
932 int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
933 if (!(s->ac_stats = av_mallocz(ac_stats_size)))
934 return AVERROR(ENOMEM);
937 if (!(avctx->stats_out = av_mallocz(256)) ||
938 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
939 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
940 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
941 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
942 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
943 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
944 !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
945 !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
946 return AVERROR(ENOMEM);
948 if (s->noise_reduction) {
949 if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
950 return AVERROR(ENOMEM);
953 ff_dct_encode_init(s);
955 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
956 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
958 if (s->slice_context_count > 1) {
961 if (avctx->codec_id == AV_CODEC_ID_H263P)
962 s->h263_slice_structured = 1;
965 s->quant_precision = 5;
967 #if FF_API_PRIVATE_OPT
968 FF_DISABLE_DEPRECATION_WARNINGS
969 if (avctx->frame_skip_threshold)
970 s->frame_skip_threshold = avctx->frame_skip_threshold;
971 if (avctx->frame_skip_factor)
972 s->frame_skip_factor = avctx->frame_skip_factor;
973 if (avctx->frame_skip_exp)
974 s->frame_skip_exp = avctx->frame_skip_exp;
975 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
976 s->frame_skip_cmp = avctx->frame_skip_cmp;
977 FF_ENABLE_DEPRECATION_WARNINGS
980 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
981 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
983 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
984 ff_h261_encode_init(s);
985 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
986 ff_h263_encode_init(s);
987 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
988 ff_msmpeg4_encode_init(s);
989 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
990 && s->out_format == FMT_MPEG1)
991 ff_mpeg1_encode_init(s);
994 for (i = 0; i < 64; i++) {
995 int j = s->idsp.idct_permutation[i];
996 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
998 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
999 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1000 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1001 s->intra_matrix[j] =
1002 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1003 } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
1004 s->intra_matrix[j] =
1005 s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1008 s->chroma_intra_matrix[j] =
1009 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1010 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1012 if (avctx->intra_matrix)
1013 s->intra_matrix[j] = avctx->intra_matrix[i];
1014 if (avctx->inter_matrix)
1015 s->inter_matrix[j] = avctx->inter_matrix[i];
1018 /* precompute matrix */
1019 /* for mjpeg, we do include qscale in the matrix */
1020 if (s->out_format != FMT_MJPEG) {
1021 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1022 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1024 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1025 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1029 if ((ret = ff_rate_control_init(s)) < 0)
1032 #if FF_API_PRIVATE_OPT
1033 FF_DISABLE_DEPRECATION_WARNINGS
1034 if (avctx->brd_scale)
1035 s->brd_scale = avctx->brd_scale;
1037 if (avctx->prediction_method)
1038 s->pred = avctx->prediction_method + 1;
1039 FF_ENABLE_DEPRECATION_WARNINGS
1042 if (s->b_frame_strategy == 2) {
1043 for (i = 0; i < s->max_b_frames + 2; i++) {
1044 s->tmp_frames[i] = av_frame_alloc();
1045 if (!s->tmp_frames[i])
1046 return AVERROR(ENOMEM);
1048 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1049 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1050 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1052 ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1058 cpb_props = ff_add_cpb_side_data(avctx);
1060 return AVERROR(ENOMEM);
1061 cpb_props->max_bitrate = avctx->rc_max_rate;
1062 cpb_props->min_bitrate = avctx->rc_min_rate;
1063 cpb_props->avg_bitrate = avctx->bit_rate;
1064 cpb_props->buffer_size = avctx->rc_buffer_size;
1069 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1071 MpegEncContext *s = avctx->priv_data;
1074 ff_rate_control_uninit(s);
1076 ff_mpv_common_end(s);
1077 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
1078 s->out_format == FMT_MJPEG)
1079 ff_mjpeg_encode_close(s);
1081 av_freep(&avctx->extradata);
1083 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1084 av_frame_free(&s->tmp_frames[i]);
1086 ff_free_picture_tables(&s->new_picture);
1087 ff_mpeg_unref_picture(avctx, &s->new_picture);
1089 av_freep(&avctx->stats_out);
1090 av_freep(&s->ac_stats);
1092 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1093 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1094 s->q_chroma_intra_matrix= NULL;
1095 s->q_chroma_intra_matrix16= NULL;
1096 av_freep(&s->q_intra_matrix);
1097 av_freep(&s->q_inter_matrix);
1098 av_freep(&s->q_intra_matrix16);
1099 av_freep(&s->q_inter_matrix16);
1100 av_freep(&s->input_picture);
1101 av_freep(&s->reordered_input_picture);
1102 av_freep(&s->dct_offset);
1107 static int get_sae(uint8_t *src, int ref, int stride)
1112 for (y = 0; y < 16; y++) {
1113 for (x = 0; x < 16; x++) {
1114 acc += FFABS(src[x + y * stride] - ref);
1121 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1122 uint8_t *ref, int stride)
1128 h = s->height & ~15;
1130 for (y = 0; y < h; y += 16) {
1131 for (x = 0; x < w; x += 16) {
1132 int offset = x + y * stride;
1133 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1135 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1136 int sae = get_sae(src + offset, mean, stride);
1138 acc += sae + 500 < sad;
1144 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1146 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1147 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1148 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1149 &s->linesize, &s->uvlinesize);
1152 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1154 Picture *pic = NULL;
1156 int i, display_picture_number = 0, ret;
1157 int encoding_delay = s->max_b_frames ? s->max_b_frames
1158 : (s->low_delay ? 0 : 1);
1159 int flush_offset = 1;
1164 display_picture_number = s->input_picture_number++;
1166 if (pts != AV_NOPTS_VALUE) {
1167 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1168 int64_t last = s->user_specified_pts;
1171 av_log(s->avctx, AV_LOG_ERROR,
1172 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1174 return AVERROR(EINVAL);
1177 if (!s->low_delay && display_picture_number == 1)
1178 s->dts_delta = pts - last;
1180 s->user_specified_pts = pts;
1182 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1183 s->user_specified_pts =
1184 pts = s->user_specified_pts + 1;
1185 av_log(s->avctx, AV_LOG_INFO,
1186 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1189 pts = display_picture_number;
1193 if (!pic_arg->buf[0] ||
1194 pic_arg->linesize[0] != s->linesize ||
1195 pic_arg->linesize[1] != s->uvlinesize ||
1196 pic_arg->linesize[2] != s->uvlinesize)
1198 if ((s->width & 15) || (s->height & 15))
1200 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1202 if (s->linesize & (STRIDE_ALIGN-1))
1205 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1206 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1208 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1212 pic = &s->picture[i];
1216 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1219 ret = alloc_picture(s, pic, direct);
1224 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1225 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1226 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1229 int h_chroma_shift, v_chroma_shift;
1230 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1234 for (i = 0; i < 3; i++) {
1235 int src_stride = pic_arg->linesize[i];
1236 int dst_stride = i ? s->uvlinesize : s->linesize;
1237 int h_shift = i ? h_chroma_shift : 0;
1238 int v_shift = i ? v_chroma_shift : 0;
1239 int w = s->width >> h_shift;
1240 int h = s->height >> v_shift;
1241 uint8_t *src = pic_arg->data[i];
1242 uint8_t *dst = pic->f->data[i];
1245 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1246 && !s->progressive_sequence
1247 && FFALIGN(s->height, 32) - s->height > 16)
1250 if (!s->avctx->rc_buffer_size)
1251 dst += INPLACE_OFFSET;
1253 if (src_stride == dst_stride)
1254 memcpy(dst, src, src_stride * h);
1257 uint8_t *dst2 = dst;
1259 memcpy(dst2, src, w);
1264 if ((s->width & 15) || (s->height & (vpad-1))) {
1265 s->mpvencdsp.draw_edges(dst, dst_stride,
1275 ret = av_frame_copy_props(pic->f, pic_arg);
1279 pic->f->display_picture_number = display_picture_number;
1280 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1282 /* Flushing: When we have not received enough input frames,
1283 * ensure s->input_picture[0] contains the first picture */
1284 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1285 if (s->input_picture[flush_offset])
1288 if (flush_offset <= 1)
1291 encoding_delay = encoding_delay - flush_offset + 1;
1294 /* shift buffer entries */
1295 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1296 s->input_picture[i - flush_offset] = s->input_picture[i];
1298 s->input_picture[encoding_delay] = (Picture*) pic;
1303 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1307 int64_t score64 = 0;
1309 for (plane = 0; plane < 3; plane++) {
1310 const int stride = p->f->linesize[plane];
1311 const int bw = plane ? 1 : 2;
1312 for (y = 0; y < s->mb_height * bw; y++) {
1313 for (x = 0; x < s->mb_width * bw; x++) {
1314 int off = p->shared ? 0 : 16;
1315 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1316 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1317 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1319 switch (FFABS(s->frame_skip_exp)) {
1320 case 0: score = FFMAX(score, v); break;
1321 case 1: score += FFABS(v); break;
1322 case 2: score64 += v * (int64_t)v; break;
1323 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1324 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1333 if (s->frame_skip_exp < 0)
1334 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1335 -1.0/s->frame_skip_exp);
1337 if (score64 < s->frame_skip_threshold)
1339 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1344 static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
1349 ret = avcodec_send_frame(c, frame);
1354 ret = avcodec_receive_packet(c, pkt);
1357 av_packet_unref(pkt);
1358 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1365 static int estimate_best_b_count(MpegEncContext *s)
1367 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1369 const int scale = s->brd_scale;
1370 int width = s->width >> scale;
1371 int height = s->height >> scale;
1372 int i, j, out_size, p_lambda, b_lambda, lambda2;
1373 int64_t best_rd = INT64_MAX;
1374 int best_b_count = -1;
1377 av_assert0(scale >= 0 && scale <= 3);
1379 pkt = av_packet_alloc();
1381 return AVERROR(ENOMEM);
1384 //s->next_picture_ptr->quality;
1385 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1386 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1387 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1388 if (!b_lambda) // FIXME we should do this somewhere else
1389 b_lambda = p_lambda;
1390 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1393 for (i = 0; i < s->max_b_frames + 2; i++) {
1394 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1395 s->next_picture_ptr;
1398 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1399 pre_input = *pre_input_ptr;
1400 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1402 if (!pre_input.shared && i) {
1403 data[0] += INPLACE_OFFSET;
1404 data[1] += INPLACE_OFFSET;
1405 data[2] += INPLACE_OFFSET;
1408 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1409 s->tmp_frames[i]->linesize[0],
1411 pre_input.f->linesize[0],
1413 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1414 s->tmp_frames[i]->linesize[1],
1416 pre_input.f->linesize[1],
1417 width >> 1, height >> 1);
1418 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1419 s->tmp_frames[i]->linesize[2],
1421 pre_input.f->linesize[2],
1422 width >> 1, height >> 1);
1426 for (j = 0; j < s->max_b_frames + 1; j++) {
1430 if (!s->input_picture[j])
1433 c = avcodec_alloc_context3(NULL);
1435 ret = AVERROR(ENOMEM);
1441 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1442 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1443 c->mb_decision = s->avctx->mb_decision;
1444 c->me_cmp = s->avctx->me_cmp;
1445 c->mb_cmp = s->avctx->mb_cmp;
1446 c->me_sub_cmp = s->avctx->me_sub_cmp;
1447 c->pix_fmt = AV_PIX_FMT_YUV420P;
1448 c->time_base = s->avctx->time_base;
1449 c->max_b_frames = s->max_b_frames;
1451 ret = avcodec_open2(c, codec, NULL);
1456 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1457 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1459 out_size = encode_frame(c, s->tmp_frames[0], pkt);
1465 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1467 for (i = 0; i < s->max_b_frames + 1; i++) {
1468 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1470 s->tmp_frames[i + 1]->pict_type = is_p ?
1471 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1472 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1474 out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1480 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1483 /* get the delayed frames */
1484 out_size = encode_frame(c, NULL, pkt);
1489 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1491 rd += c->error[0] + c->error[1] + c->error[2];
1499 avcodec_free_context(&c);
1500 av_packet_unref(pkt);
1507 av_packet_free(&pkt);
1509 return best_b_count;
1512 static int select_input_picture(MpegEncContext *s)
1516 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1517 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1518 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1520 /* set next picture type & ordering */
1521 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1522 if (s->frame_skip_threshold || s->frame_skip_factor) {
1523 if (s->picture_in_gop_number < s->gop_size &&
1524 s->next_picture_ptr &&
1525 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1526 // FIXME check that the gop check above is +-1 correct
1527 av_frame_unref(s->input_picture[0]->f);
1529 ff_vbv_update(s, 0);
1535 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1536 !s->next_picture_ptr || s->intra_only) {
1537 s->reordered_input_picture[0] = s->input_picture[0];
1538 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1539 s->reordered_input_picture[0]->f->coded_picture_number =
1540 s->coded_picture_number++;
1544 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1545 for (i = 0; i < s->max_b_frames + 1; i++) {
1546 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1548 if (pict_num >= s->rc_context.num_entries)
1550 if (!s->input_picture[i]) {
1551 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1555 s->input_picture[i]->f->pict_type =
1556 s->rc_context.entry[pict_num].new_pict_type;
1560 if (s->b_frame_strategy == 0) {
1561 b_frames = s->max_b_frames;
1562 while (b_frames && !s->input_picture[b_frames])
1564 } else if (s->b_frame_strategy == 1) {
1565 for (i = 1; i < s->max_b_frames + 1; i++) {
1566 if (s->input_picture[i] &&
1567 s->input_picture[i]->b_frame_score == 0) {
1568 s->input_picture[i]->b_frame_score =
1570 s->input_picture[i ]->f->data[0],
1571 s->input_picture[i - 1]->f->data[0],
1575 for (i = 0; i < s->max_b_frames + 1; i++) {
1576 if (!s->input_picture[i] ||
1577 s->input_picture[i]->b_frame_score - 1 >
1578 s->mb_num / s->b_sensitivity)
1582 b_frames = FFMAX(0, i - 1);
1585 for (i = 0; i < b_frames + 1; i++) {
1586 s->input_picture[i]->b_frame_score = 0;
1588 } else if (s->b_frame_strategy == 2) {
1589 b_frames = estimate_best_b_count(s);
1596 for (i = b_frames - 1; i >= 0; i--) {
1597 int type = s->input_picture[i]->f->pict_type;
1598 if (type && type != AV_PICTURE_TYPE_B)
1601 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1602 b_frames == s->max_b_frames) {
1603 av_log(s->avctx, AV_LOG_ERROR,
1604 "warning, too many B-frames in a row\n");
1607 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1608 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1609 s->gop_size > s->picture_in_gop_number) {
1610 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1612 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1614 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1618 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1619 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1622 s->reordered_input_picture[0] = s->input_picture[b_frames];
1623 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1624 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1625 s->reordered_input_picture[0]->f->coded_picture_number =
1626 s->coded_picture_number++;
1627 for (i = 0; i < b_frames; i++) {
1628 s->reordered_input_picture[i + 1] = s->input_picture[i];
1629 s->reordered_input_picture[i + 1]->f->pict_type =
1631 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1632 s->coded_picture_number++;
1637 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1639 if (s->reordered_input_picture[0]) {
1640 s->reordered_input_picture[0]->reference =
1641 s->reordered_input_picture[0]->f->pict_type !=
1642 AV_PICTURE_TYPE_B ? 3 : 0;
1644 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1647 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1648 // input is a shared pix, so we can't modify it -> allocate a new
1649 // one & ensure that the shared one is reuseable
1652 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1655 pic = &s->picture[i];
1657 pic->reference = s->reordered_input_picture[0]->reference;
1658 if (alloc_picture(s, pic, 0) < 0) {
1662 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1666 /* mark us unused / free shared pic */
1667 av_frame_unref(s->reordered_input_picture[0]->f);
1668 s->reordered_input_picture[0]->shared = 0;
1670 s->current_picture_ptr = pic;
1672 // input is not a shared pix -> reuse buffer for current_pix
1673 s->current_picture_ptr = s->reordered_input_picture[0];
1674 for (i = 0; i < 4; i++) {
1675 if (s->new_picture.f->data[i])
1676 s->new_picture.f->data[i] += INPLACE_OFFSET;
1679 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1680 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1681 s->current_picture_ptr)) < 0)
1684 s->picture_number = s->new_picture.f->display_picture_number;
1689 static void frame_end(MpegEncContext *s)
1691 if (s->unrestricted_mv &&
1692 s->current_picture.reference &&
1694 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1695 int hshift = desc->log2_chroma_w;
1696 int vshift = desc->log2_chroma_h;
1697 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1698 s->current_picture.f->linesize[0],
1699 s->h_edge_pos, s->v_edge_pos,
1700 EDGE_WIDTH, EDGE_WIDTH,
1701 EDGE_TOP | EDGE_BOTTOM);
1702 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1703 s->current_picture.f->linesize[1],
1704 s->h_edge_pos >> hshift,
1705 s->v_edge_pos >> vshift,
1706 EDGE_WIDTH >> hshift,
1707 EDGE_WIDTH >> vshift,
1708 EDGE_TOP | EDGE_BOTTOM);
1709 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1710 s->current_picture.f->linesize[2],
1711 s->h_edge_pos >> hshift,
1712 s->v_edge_pos >> vshift,
1713 EDGE_WIDTH >> hshift,
1714 EDGE_WIDTH >> vshift,
1715 EDGE_TOP | EDGE_BOTTOM);
1720 s->last_pict_type = s->pict_type;
1721 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1722 if (s->pict_type!= AV_PICTURE_TYPE_B)
1723 s->last_non_b_pict_type = s->pict_type;
1725 #if FF_API_CODED_FRAME
1726 FF_DISABLE_DEPRECATION_WARNINGS
1727 av_frame_unref(s->avctx->coded_frame);
1728 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1729 FF_ENABLE_DEPRECATION_WARNINGS
1731 #if FF_API_ERROR_FRAME
1732 FF_DISABLE_DEPRECATION_WARNINGS
1733 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1734 sizeof(s->current_picture.encoding_error));
1735 FF_ENABLE_DEPRECATION_WARNINGS
1739 static void update_noise_reduction(MpegEncContext *s)
1743 for (intra = 0; intra < 2; intra++) {
1744 if (s->dct_count[intra] > (1 << 16)) {
1745 for (i = 0; i < 64; i++) {
1746 s->dct_error_sum[intra][i] >>= 1;
1748 s->dct_count[intra] >>= 1;
1751 for (i = 0; i < 64; i++) {
1752 s->dct_offset[intra][i] = (s->noise_reduction *
1753 s->dct_count[intra] +
1754 s->dct_error_sum[intra][i] / 2) /
1755 (s->dct_error_sum[intra][i] + 1);
1760 static int frame_start(MpegEncContext *s)
1764 /* mark & release old frames */
1765 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1766 s->last_picture_ptr != s->next_picture_ptr &&
1767 s->last_picture_ptr->f->buf[0]) {
1768 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1771 s->current_picture_ptr->f->pict_type = s->pict_type;
1772 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1774 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1775 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1776 s->current_picture_ptr)) < 0)
1779 if (s->pict_type != AV_PICTURE_TYPE_B) {
1780 s->last_picture_ptr = s->next_picture_ptr;
1782 s->next_picture_ptr = s->current_picture_ptr;
1785 if (s->last_picture_ptr) {
1786 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1787 if (s->last_picture_ptr->f->buf[0] &&
1788 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1789 s->last_picture_ptr)) < 0)
1792 if (s->next_picture_ptr) {
1793 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1794 if (s->next_picture_ptr->f->buf[0] &&
1795 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1796 s->next_picture_ptr)) < 0)
1800 if (s->picture_structure!= PICT_FRAME) {
1802 for (i = 0; i < 4; i++) {
1803 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1804 s->current_picture.f->data[i] +=
1805 s->current_picture.f->linesize[i];
1807 s->current_picture.f->linesize[i] *= 2;
1808 s->last_picture.f->linesize[i] *= 2;
1809 s->next_picture.f->linesize[i] *= 2;
1813 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1814 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1815 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1816 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1817 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1818 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1820 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1821 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1824 if (s->dct_error_sum) {
1825 av_assert2(s->noise_reduction && s->encoding);
1826 update_noise_reduction(s);
1832 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1833 const AVFrame *pic_arg, int *got_packet)
1835 MpegEncContext *s = avctx->priv_data;
1836 int i, stuffing_count, ret;
1837 int context_count = s->slice_context_count;
1839 s->vbv_ignore_qmax = 0;
1841 s->picture_in_gop_number++;
1843 if (load_input_picture(s, pic_arg) < 0)
1846 if (select_input_picture(s) < 0) {
1851 if (s->new_picture.f->data[0]) {
1852 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1853 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1855 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1856 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1859 s->mb_info_ptr = av_packet_new_side_data(pkt,
1860 AV_PKT_DATA_H263_MB_INFO,
1861 s->mb_width*s->mb_height*12);
1862 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1865 for (i = 0; i < context_count; i++) {
1866 int start_y = s->thread_context[i]->start_mb_y;
1867 int end_y = s->thread_context[i]-> end_mb_y;
1868 int h = s->mb_height;
1869 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1870 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1872 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1875 s->pict_type = s->new_picture.f->pict_type;
1877 ret = frame_start(s);
1881 ret = encode_picture(s, s->picture_number);
1882 if (growing_buffer) {
1883 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1884 pkt->data = s->pb.buf;
1885 pkt->size = avctx->internal->byte_buffer_size;
1890 #if FF_API_STAT_BITS
1891 FF_DISABLE_DEPRECATION_WARNINGS
1892 avctx->header_bits = s->header_bits;
1893 avctx->mv_bits = s->mv_bits;
1894 avctx->misc_bits = s->misc_bits;
1895 avctx->i_tex_bits = s->i_tex_bits;
1896 avctx->p_tex_bits = s->p_tex_bits;
1897 avctx->i_count = s->i_count;
1898 // FIXME f/b_count in avctx
1899 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1900 avctx->skip_count = s->skip_count;
1901 FF_ENABLE_DEPRECATION_WARNINGS
1906 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
1907 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1909 if (avctx->rc_buffer_size) {
1910 RateControlContext *rcc = &s->rc_context;
1911 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1912 int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1913 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1915 if (put_bits_count(&s->pb) > max_size &&
1916 s->lambda < s->lmax) {
1917 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1918 (s->qscale + 1) / s->qscale);
1919 if (s->adaptive_quant) {
1921 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1922 s->lambda_table[i] =
1923 FFMAX(s->lambda_table[i] + min_step,
1924 s->lambda_table[i] * (s->qscale + 1) /
1927 s->mb_skipped = 0; // done in frame_start()
1928 // done in encode_picture() so we must undo it
1929 if (s->pict_type == AV_PICTURE_TYPE_P) {
1930 if (s->flipflop_rounding ||
1931 s->codec_id == AV_CODEC_ID_H263P ||
1932 s->codec_id == AV_CODEC_ID_MPEG4)
1933 s->no_rounding ^= 1;
1935 if (s->pict_type != AV_PICTURE_TYPE_B) {
1936 s->time_base = s->last_time_base;
1937 s->last_non_b_time = s->time - s->pp_time;
1939 for (i = 0; i < context_count; i++) {
1940 PutBitContext *pb = &s->thread_context[i]->pb;
1941 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1943 s->vbv_ignore_qmax = 1;
1944 av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1948 av_assert0(avctx->rc_max_rate);
1951 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1952 ff_write_pass1_stats(s);
1954 for (i = 0; i < 4; i++) {
1955 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1956 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1958 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1959 s->current_picture_ptr->encoding_error,
1960 (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1963 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1964 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1965 s->misc_bits + s->i_tex_bits +
1967 flush_put_bits(&s->pb);
1968 s->frame_bits = put_bits_count(&s->pb);
1970 stuffing_count = ff_vbv_update(s, s->frame_bits);
1971 s->stuffing_bits = 8*stuffing_count;
1972 if (stuffing_count) {
1973 if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1974 av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1978 switch (s->codec_id) {
1979 case AV_CODEC_ID_MPEG1VIDEO:
1980 case AV_CODEC_ID_MPEG2VIDEO:
1981 while (stuffing_count--) {
1982 put_bits(&s->pb, 8, 0);
1985 case AV_CODEC_ID_MPEG4:
1986 put_bits(&s->pb, 16, 0);
1987 put_bits(&s->pb, 16, 0x1C3);
1988 stuffing_count -= 4;
1989 while (stuffing_count--) {
1990 put_bits(&s->pb, 8, 0xFF);
1994 av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1996 flush_put_bits(&s->pb);
1997 s->frame_bits = put_bits_count(&s->pb);
2000 /* update MPEG-1/2 vbv_delay for CBR */
2001 if (avctx->rc_max_rate &&
2002 avctx->rc_min_rate == avctx->rc_max_rate &&
2003 s->out_format == FMT_MPEG1 &&
2004 90000LL * (avctx->rc_buffer_size - 1) <=
2005 avctx->rc_max_rate * 0xFFFFLL) {
2006 AVCPBProperties *props;
2009 int vbv_delay, min_delay;
2010 double inbits = avctx->rc_max_rate *
2011 av_q2d(avctx->time_base);
2012 int minbits = s->frame_bits - 8 *
2013 (s->vbv_delay_ptr - s->pb.buf - 1);
2014 double bits = s->rc_context.buffer_index + minbits - inbits;
2017 av_log(avctx, AV_LOG_ERROR,
2018 "Internal error, negative bits\n");
2020 av_assert1(s->repeat_first_field == 0);
2022 vbv_delay = bits * 90000 / avctx->rc_max_rate;
2023 min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2026 vbv_delay = FFMAX(vbv_delay, min_delay);
2028 av_assert0(vbv_delay < 0xFFFF);
2030 s->vbv_delay_ptr[0] &= 0xF8;
2031 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2032 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2033 s->vbv_delay_ptr[2] &= 0x07;
2034 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2036 props = av_cpb_properties_alloc(&props_size);
2038 return AVERROR(ENOMEM);
2039 props->vbv_delay = vbv_delay * 300;
2041 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2042 (uint8_t*)props, props_size);
2048 #if FF_API_VBV_DELAY
2049 FF_DISABLE_DEPRECATION_WARNINGS
2050 avctx->vbv_delay = vbv_delay * 300;
2051 FF_ENABLE_DEPRECATION_WARNINGS
2054 s->total_bits += s->frame_bits;
2055 #if FF_API_STAT_BITS
2056 FF_DISABLE_DEPRECATION_WARNINGS
2057 avctx->frame_bits = s->frame_bits;
2058 FF_ENABLE_DEPRECATION_WARNINGS
2062 pkt->pts = s->current_picture.f->pts;
2063 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2064 if (!s->current_picture.f->coded_picture_number)
2065 pkt->dts = pkt->pts - s->dts_delta;
2067 pkt->dts = s->reordered_pts;
2068 s->reordered_pts = pkt->pts;
2070 pkt->dts = pkt->pts;
2071 if (s->current_picture.f->key_frame)
2072 pkt->flags |= AV_PKT_FLAG_KEY;
2074 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2079 /* release non-reference frames */
2080 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2081 if (!s->picture[i].reference)
2082 ff_mpeg_unref_picture(avctx, &s->picture[i]);
2085 av_assert1((s->frame_bits & 7) == 0);
2087 pkt->size = s->frame_bits / 8;
2088 *got_packet = !!pkt->size;
2092 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2093 int n, int threshold)
2095 static const char tab[64] = {
2096 3, 2, 2, 1, 1, 1, 1, 1,
2097 1, 1, 1, 1, 1, 1, 1, 1,
2098 1, 1, 1, 1, 1, 1, 1, 1,
2099 0, 0, 0, 0, 0, 0, 0, 0,
2100 0, 0, 0, 0, 0, 0, 0, 0,
2101 0, 0, 0, 0, 0, 0, 0, 0,
2102 0, 0, 0, 0, 0, 0, 0, 0,
2103 0, 0, 0, 0, 0, 0, 0, 0
2108 int16_t *block = s->block[n];
2109 const int last_index = s->block_last_index[n];
2112 if (threshold < 0) {
2114 threshold = -threshold;
2118 /* Are all we could set to zero already zero? */
2119 if (last_index <= skip_dc - 1)
2122 for (i = 0; i <= last_index; i++) {
2123 const int j = s->intra_scantable.permutated[i];
2124 const int level = FFABS(block[j]);
2126 if (skip_dc && i == 0)
2130 } else if (level > 1) {
2136 if (score >= threshold)
2138 for (i = skip_dc; i <= last_index; i++) {
2139 const int j = s->intra_scantable.permutated[i];
2143 s->block_last_index[n] = 0;
2145 s->block_last_index[n] = -1;
2148 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2152 const int maxlevel = s->max_qcoeff;
2153 const int minlevel = s->min_qcoeff;
2157 i = 1; // skip clipping of intra dc
2161 for (; i <= last_index; i++) {
2162 const int j = s->intra_scantable.permutated[i];
2163 int level = block[j];
2165 if (level > maxlevel) {
2168 } else if (level < minlevel) {
2176 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2177 av_log(s->avctx, AV_LOG_INFO,
2178 "warning, clipping %d dct coefficients to %d..%d\n",
2179 overflow, minlevel, maxlevel);
2182 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2186 for (y = 0; y < 8; y++) {
2187 for (x = 0; x < 8; x++) {
2193 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2194 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2195 int v = ptr[x2 + y2 * stride];
2201 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2206 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2207 int motion_x, int motion_y,
2208 int mb_block_height,
2212 int16_t weight[12][64];
2213 int16_t orig[12][64];
2214 const int mb_x = s->mb_x;
2215 const int mb_y = s->mb_y;
2218 int dct_offset = s->linesize * 8; // default for progressive frames
2219 int uv_dct_offset = s->uvlinesize * 8;
2220 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2221 ptrdiff_t wrap_y, wrap_c;
2223 for (i = 0; i < mb_block_count; i++)
2224 skip_dct[i] = s->skipdct;
2226 if (s->adaptive_quant) {
2227 const int last_qp = s->qscale;
2228 const int mb_xy = mb_x + mb_y * s->mb_stride;
2230 s->lambda = s->lambda_table[mb_xy];
2233 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2234 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2235 s->dquant = s->qscale - last_qp;
2237 if (s->out_format == FMT_H263) {
2238 s->dquant = av_clip(s->dquant, -2, 2);
2240 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2242 if (s->pict_type == AV_PICTURE_TYPE_B) {
2243 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2246 if (s->mv_type == MV_TYPE_8X8)
2252 ff_set_qscale(s, last_qp + s->dquant);
2253 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2254 ff_set_qscale(s, s->qscale + s->dquant);
2256 wrap_y = s->linesize;
2257 wrap_c = s->uvlinesize;
2258 ptr_y = s->new_picture.f->data[0] +
2259 (mb_y * 16 * wrap_y) + mb_x * 16;
2260 ptr_cb = s->new_picture.f->data[1] +
2261 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2262 ptr_cr = s->new_picture.f->data[2] +
2263 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2265 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2266 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2267 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2268 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2269 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2271 16, 16, mb_x * 16, mb_y * 16,
2272 s->width, s->height);
2274 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2276 mb_block_width, mb_block_height,
2277 mb_x * mb_block_width, mb_y * mb_block_height,
2279 ptr_cb = ebuf + 16 * wrap_y;
2280 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2282 mb_block_width, mb_block_height,
2283 mb_x * mb_block_width, mb_y * mb_block_height,
2285 ptr_cr = ebuf + 16 * wrap_y + 16;
2289 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2290 int progressive_score, interlaced_score;
2292 s->interlaced_dct = 0;
2293 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2294 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2295 NULL, wrap_y, 8) - 400;
2297 if (progressive_score > 0) {
2298 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2299 NULL, wrap_y * 2, 8) +
2300 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2301 NULL, wrap_y * 2, 8);
2302 if (progressive_score > interlaced_score) {
2303 s->interlaced_dct = 1;
2305 dct_offset = wrap_y;
2306 uv_dct_offset = wrap_c;
2308 if (s->chroma_format == CHROMA_422 ||
2309 s->chroma_format == CHROMA_444)
2315 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2316 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2317 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2318 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2320 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2324 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2325 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2326 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2327 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2328 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2329 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2330 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2331 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2332 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2333 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2334 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2335 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2339 op_pixels_func (*op_pix)[4];
2340 qpel_mc_func (*op_qpix)[16];
2341 uint8_t *dest_y, *dest_cb, *dest_cr;
2343 dest_y = s->dest[0];
2344 dest_cb = s->dest[1];
2345 dest_cr = s->dest[2];
2347 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2348 op_pix = s->hdsp.put_pixels_tab;
2349 op_qpix = s->qdsp.put_qpel_pixels_tab;
2351 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2352 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2355 if (s->mv_dir & MV_DIR_FORWARD) {
2356 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2357 s->last_picture.f->data,
2359 op_pix = s->hdsp.avg_pixels_tab;
2360 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2362 if (s->mv_dir & MV_DIR_BACKWARD) {
2363 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2364 s->next_picture.f->data,
2368 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2369 int progressive_score, interlaced_score;
2371 s->interlaced_dct = 0;
2372 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2373 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2377 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2378 progressive_score -= 400;
2380 if (progressive_score > 0) {
2381 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2383 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2387 if (progressive_score > interlaced_score) {
2388 s->interlaced_dct = 1;
2390 dct_offset = wrap_y;
2391 uv_dct_offset = wrap_c;
2393 if (s->chroma_format == CHROMA_422)
2399 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2400 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2401 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2402 dest_y + dct_offset, wrap_y);
2403 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2404 dest_y + dct_offset + 8, wrap_y);
2406 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2410 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2411 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2412 if (!s->chroma_y_shift) { /* 422 */
2413 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2414 dest_cb + uv_dct_offset, wrap_c);
2415 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2416 dest_cr + uv_dct_offset, wrap_c);
2419 /* pre quantization */
2420 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2421 2 * s->qscale * s->qscale) {
2423 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2425 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2427 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2428 wrap_y, 8) < 20 * s->qscale)
2430 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2431 wrap_y, 8) < 20 * s->qscale)
2433 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2435 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2437 if (!s->chroma_y_shift) { /* 422 */
2438 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2439 dest_cb + uv_dct_offset,
2440 wrap_c, 8) < 20 * s->qscale)
2442 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2443 dest_cr + uv_dct_offset,
2444 wrap_c, 8) < 20 * s->qscale)
2450 if (s->quantizer_noise_shaping) {
2452 get_visual_weight(weight[0], ptr_y , wrap_y);
2454 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2456 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2458 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2460 get_visual_weight(weight[4], ptr_cb , wrap_c);
2462 get_visual_weight(weight[5], ptr_cr , wrap_c);
2463 if (!s->chroma_y_shift) { /* 422 */
2465 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2468 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2471 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2474 /* DCT & quantize */
2475 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2477 for (i = 0; i < mb_block_count; i++) {
2480 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2481 // FIXME we could decide to change to quantizer instead of
2483 // JS: I don't think that would be a good idea it could lower
2484 // quality instead of improve it. Just INTRADC clipping
2485 // deserves changes in quantizer
2487 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2489 s->block_last_index[i] = -1;
2491 if (s->quantizer_noise_shaping) {
2492 for (i = 0; i < mb_block_count; i++) {
2494 s->block_last_index[i] =
2495 dct_quantize_refine(s, s->block[i], weight[i],
2496 orig[i], i, s->qscale);
2501 if (s->luma_elim_threshold && !s->mb_intra)
2502 for (i = 0; i < 4; i++)
2503 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2504 if (s->chroma_elim_threshold && !s->mb_intra)
2505 for (i = 4; i < mb_block_count; i++)
2506 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2508 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2509 for (i = 0; i < mb_block_count; i++) {
2510 if (s->block_last_index[i] == -1)
2511 s->coded_score[i] = INT_MAX / 256;
2516 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2517 s->block_last_index[4] =
2518 s->block_last_index[5] = 0;
2520 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2521 if (!s->chroma_y_shift) { /* 422 / 444 */
2522 for (i=6; i<12; i++) {
2523 s->block_last_index[i] = 0;
2524 s->block[i][0] = s->block[4][0];
2529 // non c quantize code returns incorrect block_last_index FIXME
2530 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2531 for (i = 0; i < mb_block_count; i++) {
2533 if (s->block_last_index[i] > 0) {
2534 for (j = 63; j > 0; j--) {
2535 if (s->block[i][s->intra_scantable.permutated[j]])
2538 s->block_last_index[i] = j;
2543 /* huffman encode */
2544 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2545 case AV_CODEC_ID_MPEG1VIDEO:
2546 case AV_CODEC_ID_MPEG2VIDEO:
2547 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2548 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2550 case AV_CODEC_ID_MPEG4:
2551 if (CONFIG_MPEG4_ENCODER)
2552 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2554 case AV_CODEC_ID_MSMPEG4V2:
2555 case AV_CODEC_ID_MSMPEG4V3:
2556 case AV_CODEC_ID_WMV1:
2557 if (CONFIG_MSMPEG4_ENCODER)
2558 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2560 case AV_CODEC_ID_WMV2:
2561 if (CONFIG_WMV2_ENCODER)
2562 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2564 case AV_CODEC_ID_H261:
2565 if (CONFIG_H261_ENCODER)
2566 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2568 case AV_CODEC_ID_H263:
2569 case AV_CODEC_ID_H263P:
2570 case AV_CODEC_ID_FLV1:
2571 case AV_CODEC_ID_RV10:
2572 case AV_CODEC_ID_RV20:
2573 if (CONFIG_H263_ENCODER)
2574 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2576 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2577 case AV_CODEC_ID_MJPEG:
2578 case AV_CODEC_ID_AMV:
2579 ff_mjpeg_encode_mb(s, s->block);
2582 case AV_CODEC_ID_SPEEDHQ:
2583 if (CONFIG_SPEEDHQ_ENCODER)
2584 ff_speedhq_encode_mb(s, s->block);
2591 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2593 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2594 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2595 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2598 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2601 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2604 d->mb_skip_run= s->mb_skip_run;
2606 d->last_dc[i] = s->last_dc[i];
2609 d->mv_bits= s->mv_bits;
2610 d->i_tex_bits= s->i_tex_bits;
2611 d->p_tex_bits= s->p_tex_bits;
2612 d->i_count= s->i_count;
2613 d->f_count= s->f_count;
2614 d->b_count= s->b_count;
2615 d->skip_count= s->skip_count;
2616 d->misc_bits= s->misc_bits;
2620 d->qscale= s->qscale;
2621 d->dquant= s->dquant;
2623 d->esc3_level_length= s->esc3_level_length;
2626 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2629 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2630 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2633 d->mb_skip_run= s->mb_skip_run;
2635 d->last_dc[i] = s->last_dc[i];
2638 d->mv_bits= s->mv_bits;
2639 d->i_tex_bits= s->i_tex_bits;
2640 d->p_tex_bits= s->p_tex_bits;
2641 d->i_count= s->i_count;
2642 d->f_count= s->f_count;
2643 d->b_count= s->b_count;
2644 d->skip_count= s->skip_count;
2645 d->misc_bits= s->misc_bits;
2647 d->mb_intra= s->mb_intra;
2648 d->mb_skipped= s->mb_skipped;
2649 d->mv_type= s->mv_type;
2650 d->mv_dir= s->mv_dir;
2652 if(s->data_partitioning){
2654 d->tex_pb= s->tex_pb;
2658 d->block_last_index[i]= s->block_last_index[i];
2659 d->interlaced_dct= s->interlaced_dct;
2660 d->qscale= s->qscale;
2662 d->esc3_level_length= s->esc3_level_length;
2665 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2666 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2667 int *dmin, int *next_block, int motion_x, int motion_y)
2670 uint8_t *dest_backup[3];
2672 copy_context_before_encode(s, backup, type);
2674 s->block= s->blocks[*next_block];
2675 s->pb= pb[*next_block];
2676 if(s->data_partitioning){
2677 s->pb2 = pb2 [*next_block];
2678 s->tex_pb= tex_pb[*next_block];
2682 memcpy(dest_backup, s->dest, sizeof(s->dest));
2683 s->dest[0] = s->sc.rd_scratchpad;
2684 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2685 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2686 av_assert0(s->linesize >= 32); //FIXME
2689 encode_mb(s, motion_x, motion_y);
2691 score= put_bits_count(&s->pb);
2692 if(s->data_partitioning){
2693 score+= put_bits_count(&s->pb2);
2694 score+= put_bits_count(&s->tex_pb);
2697 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2698 ff_mpv_reconstruct_mb(s, s->block);
2700 score *= s->lambda2;
2701 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2705 memcpy(s->dest, dest_backup, sizeof(s->dest));
2712 copy_context_after_encode(best, s, type);
2716 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2717 const uint32_t *sq = ff_square_tab + 256;
2722 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2723 else if(w==8 && h==8)
2724 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2728 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2737 static int sse_mb(MpegEncContext *s){
2741 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2742 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2745 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2746 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2747 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2748 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2750 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2751 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2752 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2755 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2756 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2757 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2760 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2761 MpegEncContext *s= *(void**)arg;
2765 s->me.dia_size= s->avctx->pre_dia_size;
2766 s->first_slice_line=1;
2767 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2768 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2769 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2771 s->first_slice_line=0;
2779 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2780 MpegEncContext *s= *(void**)arg;
2782 s->me.dia_size= s->avctx->dia_size;
2783 s->first_slice_line=1;
2784 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2785 s->mb_x=0; //for block init below
2786 ff_init_block_index(s);
2787 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2788 s->block_index[0]+=2;
2789 s->block_index[1]+=2;
2790 s->block_index[2]+=2;
2791 s->block_index[3]+=2;
2793 /* compute motion vector & mb_type and store in context */
2794 if(s->pict_type==AV_PICTURE_TYPE_B)
2795 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2797 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2799 s->first_slice_line=0;
2804 static int mb_var_thread(AVCodecContext *c, void *arg){
2805 MpegEncContext *s= *(void**)arg;
2808 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2809 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2812 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2814 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2816 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2817 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2819 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2820 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2821 s->me.mb_var_sum_temp += varc;
2827 static void write_slice_end(MpegEncContext *s){
2828 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2829 if(s->partitioned_frame){
2830 ff_mpeg4_merge_partitions(s);
2833 ff_mpeg4_stuffing(&s->pb);
2834 } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2835 s->out_format == FMT_MJPEG) {
2836 ff_mjpeg_encode_stuffing(s);
2837 } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2838 ff_speedhq_end_slice(s);
2841 flush_put_bits(&s->pb);
2843 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2844 s->misc_bits+= get_bits_diff(s);
2847 static void write_mb_info(MpegEncContext *s)
2849 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2850 int offset = put_bits_count(&s->pb);
2851 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2852 int gobn = s->mb_y / s->gob_index;
2854 if (CONFIG_H263_ENCODER)
2855 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2856 bytestream_put_le32(&ptr, offset);
2857 bytestream_put_byte(&ptr, s->qscale);
2858 bytestream_put_byte(&ptr, gobn);
2859 bytestream_put_le16(&ptr, mba);
2860 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2861 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2862 /* 4MV not implemented */
2863 bytestream_put_byte(&ptr, 0); /* hmv2 */
2864 bytestream_put_byte(&ptr, 0); /* vmv2 */
2867 static void update_mb_info(MpegEncContext *s, int startcode)
2871 if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2872 s->mb_info_size += 12;
2873 s->prev_mb_info = s->last_mb_info;
2876 s->prev_mb_info = put_bytes_count(&s->pb, 0);
2877 /* This might have incremented mb_info_size above, and we return without
2878 * actually writing any info into that slot yet. But in that case,
2879 * this will be called again at the start of the after writing the
2880 * start code, actually writing the mb info. */
2884 s->last_mb_info = put_bytes_count(&s->pb, 0);
2885 if (!s->mb_info_size)
2886 s->mb_info_size += 12;
2890 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2892 if (put_bytes_left(&s->pb, 0) < threshold
2893 && s->slice_context_count == 1
2894 && s->pb.buf == s->avctx->internal->byte_buffer) {
2895 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2896 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2898 uint8_t *new_buffer = NULL;
2899 int new_buffer_size = 0;
2901 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2902 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2903 return AVERROR(ENOMEM);
2908 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2909 s->avctx->internal->byte_buffer_size + size_increase);
2911 return AVERROR(ENOMEM);
2913 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2914 av_free(s->avctx->internal->byte_buffer);
2915 s->avctx->internal->byte_buffer = new_buffer;
2916 s->avctx->internal->byte_buffer_size = new_buffer_size;
2917 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2918 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2919 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2921 if (put_bytes_left(&s->pb, 0) < threshold)
2922 return AVERROR(EINVAL);
2926 static int encode_thread(AVCodecContext *c, void *arg){
2927 MpegEncContext *s= *(void**)arg;
2928 int mb_x, mb_y, mb_y_order;
2929 int chr_h= 16>>s->chroma_y_shift;
2931 MpegEncContext best_s = { 0 }, backup_s;
2932 uint8_t bit_buf[2][MAX_MB_BYTES];
2933 uint8_t bit_buf2[2][MAX_MB_BYTES];
2934 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2935 PutBitContext pb[2], pb2[2], tex_pb[2];
2938 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2939 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2940 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2943 s->last_bits= put_bits_count(&s->pb);
2954 /* init last dc values */
2955 /* note: quant matrix value (8) is implied here */
2956 s->last_dc[i] = 128 << s->intra_dc_precision;
2958 s->current_picture.encoding_error[i] = 0;
2960 if(s->codec_id==AV_CODEC_ID_AMV){
2961 s->last_dc[0] = 128*8/13;
2962 s->last_dc[1] = 128*8/14;
2963 s->last_dc[2] = 128*8/14;
2966 memset(s->last_mv, 0, sizeof(s->last_mv));
2970 switch(s->codec_id){
2971 case AV_CODEC_ID_H263:
2972 case AV_CODEC_ID_H263P:
2973 case AV_CODEC_ID_FLV1:
2974 if (CONFIG_H263_ENCODER)
2975 s->gob_index = H263_GOB_HEIGHT(s->height);
2977 case AV_CODEC_ID_MPEG4:
2978 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2979 ff_mpeg4_init_partitions(s);
2985 s->first_slice_line = 1;
2986 s->ptr_lastgob = s->pb.buf;
2987 for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2988 if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
2990 mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
2991 if (first_in_slice && mb_y_order != s->start_mb_y)
2992 ff_speedhq_end_slice(s);
2993 s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
3000 ff_set_qscale(s, s->qscale);
3001 ff_init_block_index(s);
3003 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3004 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3005 int mb_type= s->mb_type[xy];
3009 int size_increase = s->avctx->internal->byte_buffer_size/4
3010 + s->mb_width*MAX_MB_BYTES;
3012 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
3013 if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
3014 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3017 if(s->data_partitioning){
3018 if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
3019 put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
3020 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3026 s->mb_y = mb_y; // moved into loop, can get changed by H.261
3027 ff_update_block_index(s);
3029 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3030 ff_h261_reorder_mb_index(s);
3031 xy= s->mb_y*s->mb_stride + s->mb_x;
3032 mb_type= s->mb_type[xy];
3035 /* write gob / video packet header */
3037 int current_packet_size, is_gob_start;
3039 current_packet_size = put_bytes_count(&s->pb, 1)
3040 - (s->ptr_lastgob - s->pb.buf);
3042 is_gob_start = s->rtp_payload_size &&
3043 current_packet_size >= s->rtp_payload_size &&
3046 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3048 switch(s->codec_id){
3049 case AV_CODEC_ID_H263:
3050 case AV_CODEC_ID_H263P:
3051 if(!s->h263_slice_structured)
3052 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3054 case AV_CODEC_ID_MPEG2VIDEO:
3055 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3056 case AV_CODEC_ID_MPEG1VIDEO:
3057 if(s->mb_skip_run) is_gob_start=0;
3059 case AV_CODEC_ID_MJPEG:
3060 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3065 if(s->start_mb_y != mb_y || mb_x!=0){
3068 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3069 ff_mpeg4_init_partitions(s);
3073 av_assert2((put_bits_count(&s->pb)&7) == 0);
3074 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3076 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3077 int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
3078 int d = 100 / s->error_rate;
3080 current_packet_size=0;
3081 s->pb.buf_ptr= s->ptr_lastgob;
3082 av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3086 #if FF_API_RTP_CALLBACK
3087 FF_DISABLE_DEPRECATION_WARNINGS
3088 if (s->avctx->rtp_callback){
3089 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3090 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3092 FF_ENABLE_DEPRECATION_WARNINGS
3094 update_mb_info(s, 1);
3096 switch(s->codec_id){
3097 case AV_CODEC_ID_MPEG4:
3098 if (CONFIG_MPEG4_ENCODER) {
3099 ff_mpeg4_encode_video_packet_header(s);
3100 ff_mpeg4_clean_buffers(s);
3103 case AV_CODEC_ID_MPEG1VIDEO:
3104 case AV_CODEC_ID_MPEG2VIDEO:
3105 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3106 ff_mpeg1_encode_slice_header(s);
3107 ff_mpeg1_clean_buffers(s);
3110 case AV_CODEC_ID_H263:
3111 case AV_CODEC_ID_H263P:
3112 if (CONFIG_H263_ENCODER)
3113 ff_h263_encode_gob_header(s, mb_y);
3117 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3118 int bits= put_bits_count(&s->pb);
3119 s->misc_bits+= bits - s->last_bits;
3123 s->ptr_lastgob += current_packet_size;
3124 s->first_slice_line=1;
3125 s->resync_mb_x=mb_x;
3126 s->resync_mb_y=mb_y;
3130 if( (s->resync_mb_x == s->mb_x)
3131 && s->resync_mb_y+1 == s->mb_y){
3132 s->first_slice_line=0;
3136 s->dquant=0; //only for QP_RD
3138 update_mb_info(s, 0);
3140 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3142 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3144 copy_context_before_encode(&backup_s, s, -1);
3146 best_s.data_partitioning= s->data_partitioning;
3147 best_s.partitioned_frame= s->partitioned_frame;
3148 if(s->data_partitioning){
3149 backup_s.pb2= s->pb2;
3150 backup_s.tex_pb= s->tex_pb;
3153 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3154 s->mv_dir = MV_DIR_FORWARD;
3155 s->mv_type = MV_TYPE_16X16;
3157 s->mv[0][0][0] = s->p_mv_table[xy][0];
3158 s->mv[0][0][1] = s->p_mv_table[xy][1];
3159 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3160 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3162 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3163 s->mv_dir = MV_DIR_FORWARD;
3164 s->mv_type = MV_TYPE_FIELD;
3167 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3168 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3169 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3171 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3172 &dmin, &next_block, 0, 0);
3174 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3175 s->mv_dir = MV_DIR_FORWARD;
3176 s->mv_type = MV_TYPE_16X16;
3180 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3181 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3183 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3184 s->mv_dir = MV_DIR_FORWARD;
3185 s->mv_type = MV_TYPE_8X8;
3188 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3189 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3191 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3192 &dmin, &next_block, 0, 0);
3194 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3195 s->mv_dir = MV_DIR_FORWARD;
3196 s->mv_type = MV_TYPE_16X16;
3198 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3199 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3200 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3201 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3203 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3204 s->mv_dir = MV_DIR_BACKWARD;
3205 s->mv_type = MV_TYPE_16X16;
3207 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3208 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3209 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3210 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3212 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3213 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3214 s->mv_type = MV_TYPE_16X16;
3216 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3217 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3218 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3219 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3220 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3221 &dmin, &next_block, 0, 0);
3223 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3224 s->mv_dir = MV_DIR_FORWARD;
3225 s->mv_type = MV_TYPE_FIELD;
3228 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3229 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3230 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3232 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3233 &dmin, &next_block, 0, 0);
3235 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3236 s->mv_dir = MV_DIR_BACKWARD;
3237 s->mv_type = MV_TYPE_FIELD;
3240 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3241 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3242 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3244 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3245 &dmin, &next_block, 0, 0);
3247 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3248 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3249 s->mv_type = MV_TYPE_FIELD;
3251 for(dir=0; dir<2; dir++){
3253 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3254 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3255 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3258 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3259 &dmin, &next_block, 0, 0);
3261 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3263 s->mv_type = MV_TYPE_16X16;
3267 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3268 &dmin, &next_block, 0, 0);
3269 if(s->h263_pred || s->h263_aic){
3271 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3273 ff_clean_intra_table_entries(s); //old mode?
3277 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3278 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3279 const int last_qp= backup_s.qscale;
3282 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3283 static const int dquant_tab[4]={-1,1,-2,2};
3284 int storecoefs = s->mb_intra && s->dc_val[0];
3286 av_assert2(backup_s.dquant == 0);
3289 s->mv_dir= best_s.mv_dir;
3290 s->mv_type = MV_TYPE_16X16;
3291 s->mb_intra= best_s.mb_intra;
3292 s->mv[0][0][0] = best_s.mv[0][0][0];
3293 s->mv[0][0][1] = best_s.mv[0][0][1];
3294 s->mv[1][0][0] = best_s.mv[1][0][0];
3295 s->mv[1][0][1] = best_s.mv[1][0][1];
3297 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3298 for(; qpi<4; qpi++){
3299 int dquant= dquant_tab[qpi];
3300 qp= last_qp + dquant;
3301 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3303 backup_s.dquant= dquant;
3306 dc[i]= s->dc_val[0][ s->block_index[i] ];
3307 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3311 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3312 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3313 if(best_s.qscale != qp){
3316 s->dc_val[0][ s->block_index[i] ]= dc[i];
3317 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3324 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3325 int mx= s->b_direct_mv_table[xy][0];
3326 int my= s->b_direct_mv_table[xy][1];
3328 backup_s.dquant = 0;
3329 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3331 ff_mpeg4_set_direct_mv(s, mx, my);
3332 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3333 &dmin, &next_block, mx, my);
3335 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3336 backup_s.dquant = 0;
3337 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3339 ff_mpeg4_set_direct_mv(s, 0, 0);
3340 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3341 &dmin, &next_block, 0, 0);
3343 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3346 coded |= s->block_last_index[i];
3349 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3350 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3351 mx=my=0; //FIXME find the one we actually used
3352 ff_mpeg4_set_direct_mv(s, mx, my);
3353 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3361 s->mv_dir= best_s.mv_dir;
3362 s->mv_type = best_s.mv_type;
3364 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3365 s->mv[0][0][1] = best_s.mv[0][0][1];
3366 s->mv[1][0][0] = best_s.mv[1][0][0];
3367 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3370 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3371 &dmin, &next_block, mx, my);
3376 s->current_picture.qscale_table[xy] = best_s.qscale;
3378 copy_context_after_encode(s, &best_s, -1);
3380 pb_bits_count= put_bits_count(&s->pb);
3381 flush_put_bits(&s->pb);
3382 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3385 if(s->data_partitioning){
3386 pb2_bits_count= put_bits_count(&s->pb2);
3387 flush_put_bits(&s->pb2);
3388 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3389 s->pb2= backup_s.pb2;
3391 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3392 flush_put_bits(&s->tex_pb);
3393 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3394 s->tex_pb= backup_s.tex_pb;
3396 s->last_bits= put_bits_count(&s->pb);
3398 if (CONFIG_H263_ENCODER &&
3399 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3400 ff_h263_update_motion_val(s);
3402 if(next_block==0){ //FIXME 16 vs linesize16
3403 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3404 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3405 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3408 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3409 ff_mpv_reconstruct_mb(s, s->block);
3411 int motion_x = 0, motion_y = 0;
3412 s->mv_type=MV_TYPE_16X16;
3413 // only one MB-Type possible
3416 case CANDIDATE_MB_TYPE_INTRA:
3419 motion_x= s->mv[0][0][0] = 0;
3420 motion_y= s->mv[0][0][1] = 0;
3422 case CANDIDATE_MB_TYPE_INTER:
3423 s->mv_dir = MV_DIR_FORWARD;
3425 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3426 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3428 case CANDIDATE_MB_TYPE_INTER_I:
3429 s->mv_dir = MV_DIR_FORWARD;
3430 s->mv_type = MV_TYPE_FIELD;
3433 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3434 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3435 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3438 case CANDIDATE_MB_TYPE_INTER4V:
3439 s->mv_dir = MV_DIR_FORWARD;
3440 s->mv_type = MV_TYPE_8X8;
3443 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3444 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3447 case CANDIDATE_MB_TYPE_DIRECT:
3448 if (CONFIG_MPEG4_ENCODER) {
3449 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3451 motion_x=s->b_direct_mv_table[xy][0];
3452 motion_y=s->b_direct_mv_table[xy][1];
3453 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3456 case CANDIDATE_MB_TYPE_DIRECT0:
3457 if (CONFIG_MPEG4_ENCODER) {
3458 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3460 ff_mpeg4_set_direct_mv(s, 0, 0);
3463 case CANDIDATE_MB_TYPE_BIDIR:
3464 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3466 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3467 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3468 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3469 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3471 case CANDIDATE_MB_TYPE_BACKWARD:
3472 s->mv_dir = MV_DIR_BACKWARD;
3474 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3475 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3477 case CANDIDATE_MB_TYPE_FORWARD:
3478 s->mv_dir = MV_DIR_FORWARD;
3480 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3481 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3483 case CANDIDATE_MB_TYPE_FORWARD_I:
3484 s->mv_dir = MV_DIR_FORWARD;
3485 s->mv_type = MV_TYPE_FIELD;
3488 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3489 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3490 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3493 case CANDIDATE_MB_TYPE_BACKWARD_I:
3494 s->mv_dir = MV_DIR_BACKWARD;
3495 s->mv_type = MV_TYPE_FIELD;
3498 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3499 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3500 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3503 case CANDIDATE_MB_TYPE_BIDIR_I:
3504 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3505 s->mv_type = MV_TYPE_FIELD;
3507 for(dir=0; dir<2; dir++){
3509 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3510 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3511 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3516 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3519 encode_mb(s, motion_x, motion_y);
3521 // RAL: Update last macroblock type
3522 s->last_mv_dir = s->mv_dir;
3524 if (CONFIG_H263_ENCODER &&
3525 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3526 ff_h263_update_motion_val(s);
3528 ff_mpv_reconstruct_mb(s, s->block);
3531 /* clean the MV table in IPS frames for direct mode in B-frames */
3532 if(s->mb_intra /* && I,P,S_TYPE */){
3533 s->p_mv_table[xy][0]=0;
3534 s->p_mv_table[xy][1]=0;
3537 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3541 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3542 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3544 s->current_picture.encoding_error[0] += sse(
3545 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3546 s->dest[0], w, h, s->linesize);
3547 s->current_picture.encoding_error[1] += sse(
3548 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3549 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3550 s->current_picture.encoding_error[2] += sse(
3551 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3552 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3555 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3556 ff_h263_loop_filter(s);
3558 ff_dlog(s->avctx, "MB %d %d bits\n",
3559 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3563 //not beautiful here but we must write it before flushing so it has to be here
3564 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3565 ff_msmpeg4_encode_ext_header(s);
3569 #if FF_API_RTP_CALLBACK
3570 FF_DISABLE_DEPRECATION_WARNINGS
3571 /* Send the last GOB if RTP */
3572 if (s->avctx->rtp_callback) {
3573 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3574 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3575 /* Call the RTP callback to send the last GOB */
3577 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3579 FF_ENABLE_DEPRECATION_WARNINGS
3585 #define MERGE(field) dst->field += src->field; src->field=0
3586 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3587 MERGE(me.scene_change_score);
3588 MERGE(me.mc_mb_var_sum_temp);
3589 MERGE(me.mb_var_sum_temp);
3592 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3595 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3596 MERGE(dct_count[1]);
3605 MERGE(er.error_count);
3606 MERGE(padding_bug_score);
3607 MERGE(current_picture.encoding_error[0]);
3608 MERGE(current_picture.encoding_error[1]);
3609 MERGE(current_picture.encoding_error[2]);
3611 if (dst->noise_reduction){
3612 for(i=0; i<64; i++){
3613 MERGE(dct_error_sum[0][i]);
3614 MERGE(dct_error_sum[1][i]);
3618 av_assert1(put_bits_count(&src->pb) % 8 ==0);
3619 av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3620 ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3621 flush_put_bits(&dst->pb);
3624 static int estimate_qp(MpegEncContext *s, int dry_run){
3625 if (s->next_lambda){
3626 s->current_picture_ptr->f->quality =
3627 s->current_picture.f->quality = s->next_lambda;
3628 if(!dry_run) s->next_lambda= 0;
3629 } else if (!s->fixed_qscale) {
3630 int quality = ff_rate_estimate_qscale(s, dry_run);
3631 s->current_picture_ptr->f->quality =
3632 s->current_picture.f->quality = quality;
3633 if (s->current_picture.f->quality < 0)
3637 if(s->adaptive_quant){
3638 switch(s->codec_id){
3639 case AV_CODEC_ID_MPEG4:
3640 if (CONFIG_MPEG4_ENCODER)
3641 ff_clean_mpeg4_qscales(s);
3643 case AV_CODEC_ID_H263:
3644 case AV_CODEC_ID_H263P:
3645 case AV_CODEC_ID_FLV1:
3646 if (CONFIG_H263_ENCODER)
3647 ff_clean_h263_qscales(s);
3650 ff_init_qscale_tab(s);
3653 s->lambda= s->lambda_table[0];
3656 s->lambda = s->current_picture.f->quality;
3661 /* must be called before writing the header */
3662 static void set_frame_distances(MpegEncContext * s){
3663 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3664 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3666 if(s->pict_type==AV_PICTURE_TYPE_B){
3667 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3668 av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3670 s->pp_time= s->time - s->last_non_b_time;
3671 s->last_non_b_time= s->time;
3672 av_assert1(s->picture_number==0 || s->pp_time > 0);
3676 static int encode_picture(MpegEncContext *s, int picture_number)
3680 int context_count = s->slice_context_count;
3682 s->picture_number = picture_number;
3684 /* Reset the average MB variance */
3685 s->me.mb_var_sum_temp =
3686 s->me.mc_mb_var_sum_temp = 0;
3688 /* we need to initialize some time vars before we can encode B-frames */
3689 // RAL: Condition added for MPEG1VIDEO
3690 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3691 set_frame_distances(s);
3692 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3693 ff_set_mpeg4_time(s);
3695 s->me.scene_change_score=0;
3697 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3699 if(s->pict_type==AV_PICTURE_TYPE_I){
3700 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3701 else s->no_rounding=0;
3702 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3703 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3704 s->no_rounding ^= 1;
3707 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3708 if (estimate_qp(s,1) < 0)
3710 ff_get_2pass_fcode(s);
3711 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3712 if(s->pict_type==AV_PICTURE_TYPE_B)
3713 s->lambda= s->last_lambda_for[s->pict_type];
3715 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3719 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3720 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3721 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3722 s->q_chroma_intra_matrix = s->q_intra_matrix;
3723 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3726 s->mb_intra=0; //for the rate distortion & bit compare functions
3727 for(i=1; i<context_count; i++){
3728 ret = ff_update_duplicate_context(s->thread_context[i], s);
3736 /* Estimate motion for every MB */
3737 if(s->pict_type != AV_PICTURE_TYPE_I){
3738 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3739 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3740 if (s->pict_type != AV_PICTURE_TYPE_B) {
3741 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3743 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3747 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3748 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3750 for(i=0; i<s->mb_stride*s->mb_height; i++)
3751 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3753 if(!s->fixed_qscale){
3754 /* finding spatial complexity for I-frame rate control */
3755 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3758 for(i=1; i<context_count; i++){
3759 merge_context_after_me(s, s->thread_context[i]);
3761 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3762 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3765 if (s->me.scene_change_score > s->scenechange_threshold &&
3766 s->pict_type == AV_PICTURE_TYPE_P) {
3767 s->pict_type= AV_PICTURE_TYPE_I;
3768 for(i=0; i<s->mb_stride*s->mb_height; i++)
3769 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3770 if(s->msmpeg4_version >= 3)
3772 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3773 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3777 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3778 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3780 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3782 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3783 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3784 s->f_code= FFMAX3(s->f_code, a, b);
3787 ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
3788 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3789 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3793 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3794 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3799 if(s->pict_type==AV_PICTURE_TYPE_B){
3802 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3803 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3804 s->f_code = FFMAX(a, b);
3806 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3807 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3808 s->b_code = FFMAX(a, b);
3810 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3811 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3812 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3813 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3814 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3816 for(dir=0; dir<2; dir++){
3819 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3820 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3821 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3822 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3830 if (estimate_qp(s, 0) < 0)
3833 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3834 s->pict_type == AV_PICTURE_TYPE_I &&
3835 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3836 s->qscale= 3; //reduce clipping problems
3838 if (s->out_format == FMT_MJPEG) {
3839 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3840 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3842 if (s->avctx->intra_matrix) {
3844 luma_matrix = s->avctx->intra_matrix;
3846 if (s->avctx->chroma_intra_matrix)
3847 chroma_matrix = s->avctx->chroma_intra_matrix;
3849 /* for mjpeg, we do include qscale in the matrix */
3851 int j = s->idsp.idct_permutation[i];
3853 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3854 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3856 s->y_dc_scale_table=
3857 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3858 s->chroma_intra_matrix[0] =
3859 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3860 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3861 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3862 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3863 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3866 if(s->codec_id == AV_CODEC_ID_AMV){
3867 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3868 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3870 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3872 s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3873 s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3875 s->y_dc_scale_table= y;
3876 s->c_dc_scale_table= c;
3877 s->intra_matrix[0] = 13;
3878 s->chroma_intra_matrix[0] = 14;
3879 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3880 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3881 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3882 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3886 if (s->out_format == FMT_SPEEDHQ) {
3887 s->y_dc_scale_table=
3888 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3891 //FIXME var duplication
3892 s->current_picture_ptr->f->key_frame =
3893 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3894 s->current_picture_ptr->f->pict_type =
3895 s->current_picture.f->pict_type = s->pict_type;
3897 if (s->current_picture.f->key_frame)
3898 s->picture_in_gop_number=0;
3900 s->mb_x = s->mb_y = 0;
3901 s->last_bits= put_bits_count(&s->pb);
3902 switch(s->out_format) {
3903 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3905 /* s->huffman == HUFFMAN_TABLE_OPTIMAL can only be true for MJPEG. */
3906 if (!CONFIG_MJPEG_ENCODER || s->huffman != HUFFMAN_TABLE_OPTIMAL)
3907 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3908 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3912 if (CONFIG_SPEEDHQ_ENCODER)
3913 ff_speedhq_encode_picture_header(s);
3916 if (CONFIG_H261_ENCODER)
3917 ff_h261_encode_picture_header(s, picture_number);
3920 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3921 ff_wmv2_encode_picture_header(s, picture_number);
3922 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3923 ff_msmpeg4_encode_picture_header(s, picture_number);
3924 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3925 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3928 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3929 ret = ff_rv10_encode_picture_header(s, picture_number);
3933 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3934 ff_rv20_encode_picture_header(s, picture_number);
3935 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3936 ff_flv_encode_picture_header(s, picture_number);
3937 else if (CONFIG_H263_ENCODER)
3938 ff_h263_encode_picture_header(s, picture_number);
3941 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3942 ff_mpeg1_encode_picture_header(s, picture_number);
3947 bits= put_bits_count(&s->pb);
3948 s->header_bits= bits - s->last_bits;
3950 for(i=1; i<context_count; i++){
3951 update_duplicate_context_after_me(s->thread_context[i], s);
3953 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3954 for(i=1; i<context_count; i++){
3955 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3956 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3957 merge_context_after_encode(s, s->thread_context[i]);
3963 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3964 const int intra= s->mb_intra;
3967 s->dct_count[intra]++;
3969 for(i=0; i<64; i++){
3970 int level= block[i];
3974 s->dct_error_sum[intra][i] += level;
3975 level -= s->dct_offset[intra][i];
3976 if(level<0) level=0;
3978 s->dct_error_sum[intra][i] -= level;
3979 level += s->dct_offset[intra][i];
3980 if(level>0) level=0;
3987 static int dct_quantize_trellis_c(MpegEncContext *s,
3988 int16_t *block, int n,
3989 int qscale, int *overflow){
3991 const uint16_t *matrix;
3992 const uint8_t *scantable;
3993 const uint8_t *perm_scantable;
3995 unsigned int threshold1, threshold2;
4007 int coeff_count[64];
4008 int qmul, qadd, start_i, last_non_zero, i, dc;
4009 const int esc_length= s->ac_esc_length;
4011 uint8_t * last_length;
4012 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4015 s->fdsp.fdct(block);
4017 if(s->dct_error_sum)
4018 s->denoise_dct(s, block);
4020 qadd= ((qscale-1)|1)*8;
4022 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4023 else mpeg2_qscale = qscale << 1;
4027 scantable= s->intra_scantable.scantable;
4028 perm_scantable= s->intra_scantable.permutated;
4036 /* For AIC we skip quant/dequant of INTRADC */
4041 /* note: block[0] is assumed to be positive */
4042 block[0] = (block[0] + (q >> 1)) / q;
4045 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4046 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4047 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4048 bias= 1<<(QMAT_SHIFT-1);
4050 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4051 length = s->intra_chroma_ac_vlc_length;
4052 last_length= s->intra_chroma_ac_vlc_last_length;
4054 length = s->intra_ac_vlc_length;
4055 last_length= s->intra_ac_vlc_last_length;
4058 scantable= s->inter_scantable.scantable;
4059 perm_scantable= s->inter_scantable.permutated;
4062 qmat = s->q_inter_matrix[qscale];
4063 matrix = s->inter_matrix;
4064 length = s->inter_ac_vlc_length;
4065 last_length= s->inter_ac_vlc_last_length;
4069 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4070 threshold2= (threshold1<<1);
4072 for(i=63; i>=start_i; i--) {
4073 const int j = scantable[i];
4074 int level = block[j] * qmat[j];
4076 if(((unsigned)(level+threshold1))>threshold2){
4082 for(i=start_i; i<=last_non_zero; i++) {
4083 const int j = scantable[i];
4084 int level = block[j] * qmat[j];
4086 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4087 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4088 if(((unsigned)(level+threshold1))>threshold2){
4090 level= (bias + level)>>QMAT_SHIFT;
4092 coeff[1][i]= level-1;
4093 // coeff[2][k]= level-2;
4095 level= (bias - level)>>QMAT_SHIFT;
4096 coeff[0][i]= -level;
4097 coeff[1][i]= -level+1;
4098 // coeff[2][k]= -level+2;
4100 coeff_count[i]= FFMIN(level, 2);
4101 av_assert2(coeff_count[i]);
4104 coeff[0][i]= (level>>31)|1;
4109 *overflow= s->max_qcoeff < max; //overflow might have happened
4111 if(last_non_zero < start_i){
4112 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4113 return last_non_zero;
4116 score_tab[start_i]= 0;
4117 survivor[0]= start_i;
4120 for(i=start_i; i<=last_non_zero; i++){
4121 int level_index, j, zero_distortion;
4122 int dct_coeff= FFABS(block[ scantable[i] ]);
4123 int best_score=256*256*256*120;
4125 if (s->fdsp.fdct == ff_fdct_ifast)
4126 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4127 zero_distortion= dct_coeff*dct_coeff;
4129 for(level_index=0; level_index < coeff_count[i]; level_index++){
4131 int level= coeff[level_index][i];
4132 const int alevel= FFABS(level);
4137 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4138 unquant_coeff= alevel*qmul + qadd;
4139 } else if(s->out_format == FMT_MJPEG) {
4140 j = s->idsp.idct_permutation[scantable[i]];
4141 unquant_coeff = alevel * matrix[j] * 8;
4143 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4145 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4146 unquant_coeff = (unquant_coeff - 1) | 1;
4148 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4149 unquant_coeff = (unquant_coeff - 1) | 1;
4154 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4156 if((level&(~127)) == 0){
4157 for(j=survivor_count-1; j>=0; j--){
4158 int run= i - survivor[j];
4159 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4160 score += score_tab[i-run];
4162 if(score < best_score){
4165 level_tab[i+1]= level-64;
4169 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4170 for(j=survivor_count-1; j>=0; j--){
4171 int run= i - survivor[j];
4172 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4173 score += score_tab[i-run];
4174 if(score < last_score){
4177 last_level= level-64;
4183 distortion += esc_length*lambda;
4184 for(j=survivor_count-1; j>=0; j--){
4185 int run= i - survivor[j];
4186 int score= distortion + score_tab[i-run];
4188 if(score < best_score){
4191 level_tab[i+1]= level-64;
4195 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4196 for(j=survivor_count-1; j>=0; j--){
4197 int run= i - survivor[j];
4198 int score= distortion + score_tab[i-run];
4199 if(score < last_score){
4202 last_level= level-64;
4210 score_tab[i+1]= best_score;
4212 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4213 if(last_non_zero <= 27){
4214 for(; survivor_count; survivor_count--){
4215 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4219 for(; survivor_count; survivor_count--){
4220 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4225 survivor[ survivor_count++ ]= i+1;
4228 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4229 last_score= 256*256*256*120;
4230 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4231 int score= score_tab[i];
4233 score += lambda * 2; // FIXME more exact?
4235 if(score < last_score){
4238 last_level= level_tab[i];
4239 last_run= run_tab[i];
4244 s->coded_score[n] = last_score;
4246 dc= FFABS(block[0]);
4247 last_non_zero= last_i - 1;
4248 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4250 if(last_non_zero < start_i)
4251 return last_non_zero;
4253 if(last_non_zero == 0 && start_i == 0){
4255 int best_score= dc * dc;
4257 for(i=0; i<coeff_count[0]; i++){
4258 int level= coeff[i][0];
4259 int alevel= FFABS(level);
4260 int unquant_coeff, score, distortion;
4262 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4263 unquant_coeff= (alevel*qmul + qadd)>>3;
4265 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4266 unquant_coeff = (unquant_coeff - 1) | 1;
4268 unquant_coeff = (unquant_coeff + 4) >> 3;
4269 unquant_coeff<<= 3 + 3;
4271 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4273 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4274 else score= distortion + esc_length*lambda;
4276 if(score < best_score){
4278 best_level= level - 64;
4281 block[0]= best_level;
4282 s->coded_score[n] = best_score - dc*dc;
4283 if(best_level == 0) return -1;
4284 else return last_non_zero;
4288 av_assert2(last_level);
4290 block[ perm_scantable[last_non_zero] ]= last_level;
4293 for(; i>start_i; i -= run_tab[i] + 1){
4294 block[ perm_scantable[i-1] ]= level_tab[i];
4297 return last_non_zero;
4300 static int16_t basis[64][64];
4302 static void build_basis(uint8_t *perm){
4309 double s= 0.25*(1<<BASIS_SHIFT);
4311 int perm_index= perm[index];
4312 if(i==0) s*= sqrt(0.5);
4313 if(j==0) s*= sqrt(0.5);
4314 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4321 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4322 int16_t *block, int16_t *weight, int16_t *orig,
4325 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4326 const uint8_t *scantable;
4327 const uint8_t *perm_scantable;
4328 // unsigned int threshold1, threshold2;
4333 int qmul, qadd, start_i, last_non_zero, i, dc;
4335 uint8_t * last_length;
4337 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4339 if(basis[0][0] == 0)
4340 build_basis(s->idsp.idct_permutation);
4345 scantable= s->intra_scantable.scantable;
4346 perm_scantable= s->intra_scantable.permutated;
4353 /* For AIC we skip quant/dequant of INTRADC */
4357 q <<= RECON_SHIFT-3;
4358 /* note: block[0] is assumed to be positive */
4360 // block[0] = (block[0] + (q >> 1)) / q;
4362 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4363 // bias= 1<<(QMAT_SHIFT-1);
4364 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4365 length = s->intra_chroma_ac_vlc_length;
4366 last_length= s->intra_chroma_ac_vlc_last_length;
4368 length = s->intra_ac_vlc_length;
4369 last_length= s->intra_ac_vlc_last_length;
4372 scantable= s->inter_scantable.scantable;
4373 perm_scantable= s->inter_scantable.permutated;
4376 length = s->inter_ac_vlc_length;
4377 last_length= s->inter_ac_vlc_last_length;
4379 last_non_zero = s->block_last_index[n];
4381 dc += (1<<(RECON_SHIFT-1));
4382 for(i=0; i<64; i++){
4383 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4387 for(i=0; i<64; i++){
4392 w= FFABS(weight[i]) + qns*one;
4393 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4396 // w=weight[i] = (63*qns + (w/2)) / w;
4399 av_assert2(w<(1<<6));
4402 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4406 for(i=start_i; i<=last_non_zero; i++){
4407 int j= perm_scantable[i];
4408 const int level= block[j];
4412 if(level<0) coeff= qmul*level - qadd;
4413 else coeff= qmul*level + qadd;
4414 run_tab[rle_index++]=run;
4417 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4424 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4427 int run2, best_unquant_change=0, analyze_gradient;
4428 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4430 if(analyze_gradient){
4431 for(i=0; i<64; i++){
4434 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4440 const int level= block[0];
4441 int change, old_coeff;
4443 av_assert2(s->mb_intra);
4447 for(change=-1; change<=1; change+=2){
4448 int new_level= level + change;
4449 int score, new_coeff;
4451 new_coeff= q*new_level;
4452 if(new_coeff >= 2048 || new_coeff < 0)
4455 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4456 new_coeff - old_coeff);
4457 if(score<best_score){
4460 best_change= change;
4461 best_unquant_change= new_coeff - old_coeff;
4468 run2= run_tab[rle_index++];
4472 for(i=start_i; i<64; i++){
4473 int j= perm_scantable[i];
4474 const int level= block[j];
4475 int change, old_coeff;
4477 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4481 if(level<0) old_coeff= qmul*level - qadd;
4482 else old_coeff= qmul*level + qadd;
4483 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4487 av_assert2(run2>=0 || i >= last_non_zero );
4490 for(change=-1; change<=1; change+=2){
4491 int new_level= level + change;
4492 int score, new_coeff, unquant_change;
4495 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4499 if(new_level<0) new_coeff= qmul*new_level - qadd;
4500 else new_coeff= qmul*new_level + qadd;
4501 if(new_coeff >= 2048 || new_coeff <= -2048)
4503 //FIXME check for overflow
4506 if(level < 63 && level > -63){
4507 if(i < last_non_zero)
4508 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4509 - length[UNI_AC_ENC_INDEX(run, level+64)];
4511 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4512 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4515 av_assert2(FFABS(new_level)==1);
4517 if(analyze_gradient){
4518 int g= d1[ scantable[i] ];
4519 if(g && (g^new_level) >= 0)
4523 if(i < last_non_zero){
4524 int next_i= i + run2 + 1;
4525 int next_level= block[ perm_scantable[next_i] ] + 64;
4527 if(next_level&(~127))
4530 if(next_i < last_non_zero)
4531 score += length[UNI_AC_ENC_INDEX(run, 65)]
4532 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4533 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4535 score += length[UNI_AC_ENC_INDEX(run, 65)]
4536 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4537 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4539 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4541 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4542 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4548 av_assert2(FFABS(level)==1);
4550 if(i < last_non_zero){
4551 int next_i= i + run2 + 1;
4552 int next_level= block[ perm_scantable[next_i] ] + 64;
4554 if(next_level&(~127))
4557 if(next_i < last_non_zero)
4558 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4559 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4560 - length[UNI_AC_ENC_INDEX(run, 65)];
4562 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4563 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4564 - length[UNI_AC_ENC_INDEX(run, 65)];
4566 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4568 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4569 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4576 unquant_change= new_coeff - old_coeff;
4577 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4579 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4581 if(score<best_score){
4584 best_change= change;
4585 best_unquant_change= unquant_change;
4589 prev_level= level + 64;
4590 if(prev_level&(~127))
4600 int j= perm_scantable[ best_coeff ];
4602 block[j] += best_change;
4604 if(best_coeff > last_non_zero){
4605 last_non_zero= best_coeff;
4606 av_assert2(block[j]);
4608 for(; last_non_zero>=start_i; last_non_zero--){
4609 if(block[perm_scantable[last_non_zero]])
4616 for(i=start_i; i<=last_non_zero; i++){
4617 int j= perm_scantable[i];
4618 const int level= block[j];
4621 run_tab[rle_index++]=run;
4628 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4634 return last_non_zero;
4638 * Permute an 8x8 block according to permutation.
4639 * @param block the block which will be permuted according to
4640 * the given permutation vector
4641 * @param permutation the permutation vector
4642 * @param last the last non zero coefficient in scantable order, used to
4643 * speed the permutation up
4644 * @param scantable the used scantable, this is only used to speed the
4645 * permutation up, the block is not (inverse) permutated
4646 * to scantable order!
4648 void ff_block_permute(int16_t *block, uint8_t *permutation,
4649 const uint8_t *scantable, int last)
4656 //FIXME it is ok but not clean and might fail for some permutations
4657 // if (permutation[1] == 1)
4660 for (i = 0; i <= last; i++) {
4661 const int j = scantable[i];
4666 for (i = 0; i <= last; i++) {
4667 const int j = scantable[i];
4668 const int perm_j = permutation[j];
4669 block[perm_j] = temp[j];
4673 int ff_dct_quantize_c(MpegEncContext *s,
4674 int16_t *block, int n,
4675 int qscale, int *overflow)
4677 int i, j, level, last_non_zero, q, start_i;
4679 const uint8_t *scantable;
4682 unsigned int threshold1, threshold2;
4684 s->fdsp.fdct(block);
4686 if(s->dct_error_sum)
4687 s->denoise_dct(s, block);
4690 scantable= s->intra_scantable.scantable;
4698 /* For AIC we skip quant/dequant of INTRADC */
4701 /* note: block[0] is assumed to be positive */
4702 block[0] = (block[0] + (q >> 1)) / q;
4705 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4706 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4708 scantable= s->inter_scantable.scantable;
4711 qmat = s->q_inter_matrix[qscale];
4712 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4714 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4715 threshold2= (threshold1<<1);
4716 for(i=63;i>=start_i;i--) {
4718 level = block[j] * qmat[j];
4720 if(((unsigned)(level+threshold1))>threshold2){
4727 for(i=start_i; i<=last_non_zero; i++) {
4729 level = block[j] * qmat[j];
4731 // if( bias+level >= (1<<QMAT_SHIFT)
4732 // || bias-level >= (1<<QMAT_SHIFT)){
4733 if(((unsigned)(level+threshold1))>threshold2){
4735 level= (bias + level)>>QMAT_SHIFT;
4738 level= (bias - level)>>QMAT_SHIFT;
4746 *overflow= s->max_qcoeff < max; //overflow might have happened
4748 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4749 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4750 ff_block_permute(block, s->idsp.idct_permutation,
4751 scantable, last_non_zero);
4753 return last_non_zero;
4756 #define OFFSET(x) offsetof(MpegEncContext, x)
4757 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4758 static const AVOption h263_options[] = {
4759 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4760 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4765 static const AVClass h263_class = {
4766 .class_name = "H.263 encoder",
4767 .item_name = av_default_item_name,
4768 .option = h263_options,
4769 .version = LIBAVUTIL_VERSION_INT,
4772 AVCodec ff_h263_encoder = {
4774 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4775 .type = AVMEDIA_TYPE_VIDEO,
4776 .id = AV_CODEC_ID_H263,
4777 .priv_data_size = sizeof(MpegEncContext),
4778 .init = ff_mpv_encode_init,
4779 .encode2 = ff_mpv_encode_picture,
4780 .close = ff_mpv_encode_end,
4781 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4782 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4783 .priv_class = &h263_class,
4786 static const AVOption h263p_options[] = {
4787 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4788 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4789 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4790 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4794 static const AVClass h263p_class = {
4795 .class_name = "H.263p encoder",
4796 .item_name = av_default_item_name,
4797 .option = h263p_options,
4798 .version = LIBAVUTIL_VERSION_INT,
4801 AVCodec ff_h263p_encoder = {
4803 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4804 .type = AVMEDIA_TYPE_VIDEO,
4805 .id = AV_CODEC_ID_H263P,
4806 .priv_data_size = sizeof(MpegEncContext),
4807 .init = ff_mpv_encode_init,
4808 .encode2 = ff_mpv_encode_picture,
4809 .close = ff_mpv_encode_end,
4810 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4811 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4812 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4813 .priv_class = &h263p_class,
4816 static const AVClass msmpeg4v2_class = {
4817 .class_name = "msmpeg4v2 encoder",
4818 .item_name = av_default_item_name,
4819 .option = ff_mpv_generic_options,
4820 .version = LIBAVUTIL_VERSION_INT,
4823 AVCodec ff_msmpeg4v2_encoder = {
4824 .name = "msmpeg4v2",
4825 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4826 .type = AVMEDIA_TYPE_VIDEO,
4827 .id = AV_CODEC_ID_MSMPEG4V2,
4828 .priv_data_size = sizeof(MpegEncContext),
4829 .init = ff_mpv_encode_init,
4830 .encode2 = ff_mpv_encode_picture,
4831 .close = ff_mpv_encode_end,
4832 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4833 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4834 .priv_class = &msmpeg4v2_class,
4837 static const AVClass msmpeg4v3_class = {
4838 .class_name = "msmpeg4v3 encoder",
4839 .item_name = av_default_item_name,
4840 .option = ff_mpv_generic_options,
4841 .version = LIBAVUTIL_VERSION_INT,
4844 AVCodec ff_msmpeg4v3_encoder = {
4846 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4847 .type = AVMEDIA_TYPE_VIDEO,
4848 .id = AV_CODEC_ID_MSMPEG4V3,
4849 .priv_data_size = sizeof(MpegEncContext),
4850 .init = ff_mpv_encode_init,
4851 .encode2 = ff_mpv_encode_picture,
4852 .close = ff_mpv_encode_end,
4853 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4854 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4855 .priv_class = &msmpeg4v3_class,
4858 static const AVClass wmv1_class = {
4859 .class_name = "wmv1 encoder",
4860 .item_name = av_default_item_name,
4861 .option = ff_mpv_generic_options,
4862 .version = LIBAVUTIL_VERSION_INT,
4865 AVCodec ff_wmv1_encoder = {
4867 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4868 .type = AVMEDIA_TYPE_VIDEO,
4869 .id = AV_CODEC_ID_WMV1,
4870 .priv_data_size = sizeof(MpegEncContext),
4871 .init = ff_mpv_encode_init,
4872 .encode2 = ff_mpv_encode_picture,
4873 .close = ff_mpv_encode_end,
4874 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4875 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4876 .priv_class = &wmv1_class,