2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
45 #include "mpegvideo.h"
46 #include "mpegvideodata.h"
50 #include "mjpegenc_common.h"
52 #include "mpegutils.h"
55 #include "pixblockdsp.h"
59 #include "aandcttab.h"
61 #include "mpeg4video.h"
63 #include "bytestream.h"
66 #include "packet_internal.h"
71 #define QUANT_BIAS_SHIFT 8
73 #define QMAT_SHIFT_MMX 16
76 static int encode_picture(MpegEncContext *s, int picture_number);
77 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
78 static int sse_mb(MpegEncContext *s);
79 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
80 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
82 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
83 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
85 const AVOption ff_mpv_generic_options[] = {
90 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
91 uint16_t (*qmat16)[2][64],
92 const uint16_t *quant_matrix,
93 int bias, int qmin, int qmax, int intra)
95 FDCTDSPContext *fdsp = &s->fdsp;
99 for (qscale = qmin; qscale <= qmax; qscale++) {
103 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
104 else qscale2 = qscale << 1;
106 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
108 fdsp->fdct == ff_faandct ||
109 #endif /* CONFIG_FAANDCT */
110 fdsp->fdct == ff_jpeg_fdct_islow_10) {
111 for (i = 0; i < 64; i++) {
112 const int j = s->idsp.idct_permutation[i];
113 int64_t den = (int64_t) qscale2 * quant_matrix[j];
114 /* 16 <= qscale * quant_matrix[i] <= 7905
115 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
116 * 19952 <= x <= 249205026
117 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
118 * 3444240 >= (1 << 36) / (x) >= 275 */
120 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
122 } else if (fdsp->fdct == ff_fdct_ifast) {
123 for (i = 0; i < 64; i++) {
124 const int j = s->idsp.idct_permutation[i];
125 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
126 /* 16 <= qscale * quant_matrix[i] <= 7905
127 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
128 * 19952 <= x <= 249205026
129 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
130 * 3444240 >= (1 << 36) / (x) >= 275 */
132 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
135 for (i = 0; i < 64; i++) {
136 const int j = s->idsp.idct_permutation[i];
137 int64_t den = (int64_t) qscale2 * quant_matrix[j];
138 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
139 * Assume x = qscale * quant_matrix[i]
141 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
142 * so 32768 >= (1 << 19) / (x) >= 67 */
143 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
144 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
145 // (qscale * quant_matrix[i]);
146 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
148 if (qmat16[qscale][0][i] == 0 ||
149 qmat16[qscale][0][i] == 128 * 256)
150 qmat16[qscale][0][i] = 128 * 256 - 1;
151 qmat16[qscale][1][i] =
152 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
153 qmat16[qscale][0][i]);
157 for (i = intra; i < 64; i++) {
159 if (fdsp->fdct == ff_fdct_ifast) {
160 max = (8191LL * ff_aanscales[i]) >> 14;
162 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
168 av_log(s->avctx, AV_LOG_INFO,
169 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
174 static inline void update_qscale(MpegEncContext *s)
176 if (s->q_scale_type == 1 && 0) {
178 int bestdiff=INT_MAX;
181 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
182 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
183 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
184 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
186 if (diff < bestdiff) {
193 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
194 (FF_LAMBDA_SHIFT + 7);
195 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
198 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
202 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
208 for (i = 0; i < 64; i++) {
209 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
216 * init s->current_picture.qscale_table from s->lambda_table
218 void ff_init_qscale_tab(MpegEncContext *s)
220 int8_t * const qscale_table = s->current_picture.qscale_table;
223 for (i = 0; i < s->mb_num; i++) {
224 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
225 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
226 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
231 static void update_duplicate_context_after_me(MpegEncContext *dst,
234 #define COPY(a) dst->a= src->a
236 COPY(current_picture);
242 COPY(picture_in_gop_number);
243 COPY(gop_picture_number);
244 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
245 COPY(progressive_frame); // FIXME don't set in encode_header
246 COPY(partitioned_frame); // FIXME don't set in encode_header
251 * Set the given MpegEncContext to defaults for encoding.
252 * the changed fields will not depend upon the prior state of the MpegEncContext.
254 static void mpv_encode_defaults(MpegEncContext *s)
257 ff_mpv_common_defaults(s);
259 for (i = -16; i < 16; i++) {
260 default_fcode_tab[i + MAX_MV] = 1;
262 s->me.mv_penalty = default_mv_penalty;
263 s->fcode_tab = default_fcode_tab;
265 s->input_picture_number = 0;
266 s->picture_in_gop_number = 0;
269 av_cold int ff_dct_encode_init(MpegEncContext *s)
272 ff_dct_encode_init_x86(s);
274 if (CONFIG_H263_ENCODER)
275 ff_h263dsp_init(&s->h263dsp);
276 if (!s->dct_quantize)
277 s->dct_quantize = ff_dct_quantize_c;
279 s->denoise_dct = denoise_dct_c;
280 s->fast_dct_quantize = s->dct_quantize;
281 if (s->avctx->trellis)
282 s->dct_quantize = dct_quantize_trellis_c;
287 /* init video encoder */
288 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
290 MpegEncContext *s = avctx->priv_data;
291 AVCPBProperties *cpb_props;
292 int i, ret, format_supported;
294 mpv_encode_defaults(s);
296 switch (avctx->codec_id) {
297 case AV_CODEC_ID_MPEG2VIDEO:
298 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
299 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
300 av_log(avctx, AV_LOG_ERROR,
301 "only YUV420 and YUV422 are supported\n");
302 return AVERROR(EINVAL);
305 case AV_CODEC_ID_MJPEG:
306 case AV_CODEC_ID_AMV:
307 format_supported = 0;
308 /* JPEG color space */
309 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
310 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
311 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
312 (avctx->color_range == AVCOL_RANGE_JPEG &&
313 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
314 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
315 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
316 format_supported = 1;
317 /* MPEG color space */
318 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
319 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
320 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
321 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
322 format_supported = 1;
324 if (!format_supported) {
325 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
326 return AVERROR(EINVAL);
330 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
331 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
332 return AVERROR(EINVAL);
336 switch (avctx->pix_fmt) {
337 case AV_PIX_FMT_YUVJ444P:
338 case AV_PIX_FMT_YUV444P:
339 s->chroma_format = CHROMA_444;
341 case AV_PIX_FMT_YUVJ422P:
342 case AV_PIX_FMT_YUV422P:
343 s->chroma_format = CHROMA_422;
345 case AV_PIX_FMT_YUVJ420P:
346 case AV_PIX_FMT_YUV420P:
348 s->chroma_format = CHROMA_420;
352 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
354 #if FF_API_PRIVATE_OPT
355 FF_DISABLE_DEPRECATION_WARNINGS
356 if (avctx->rtp_payload_size)
357 s->rtp_payload_size = avctx->rtp_payload_size;
358 if (avctx->me_penalty_compensation)
359 s->me_penalty_compensation = avctx->me_penalty_compensation;
361 s->me_pre = avctx->pre_me;
362 FF_ENABLE_DEPRECATION_WARNINGS
365 s->bit_rate = avctx->bit_rate;
366 s->width = avctx->width;
367 s->height = avctx->height;
368 if (avctx->gop_size > 600 &&
369 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
370 av_log(avctx, AV_LOG_WARNING,
371 "keyframe interval too large!, reducing it from %d to %d\n",
372 avctx->gop_size, 600);
373 avctx->gop_size = 600;
375 s->gop_size = avctx->gop_size;
377 if (avctx->max_b_frames > MAX_B_FRAMES) {
378 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
379 "is %d.\n", MAX_B_FRAMES);
380 avctx->max_b_frames = MAX_B_FRAMES;
382 s->max_b_frames = avctx->max_b_frames;
383 s->codec_id = avctx->codec->id;
384 s->strict_std_compliance = avctx->strict_std_compliance;
385 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
386 s->rtp_mode = !!s->rtp_payload_size;
387 s->intra_dc_precision = avctx->intra_dc_precision;
389 // workaround some differences between how applications specify dc precision
390 if (s->intra_dc_precision < 0) {
391 s->intra_dc_precision += 8;
392 } else if (s->intra_dc_precision >= 8)
393 s->intra_dc_precision -= 8;
395 if (s->intra_dc_precision < 0) {
396 av_log(avctx, AV_LOG_ERROR,
397 "intra dc precision must be positive, note some applications use"
398 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
399 return AVERROR(EINVAL);
402 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
405 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
406 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
407 return AVERROR(EINVAL);
409 s->user_specified_pts = AV_NOPTS_VALUE;
411 if (s->gop_size <= 1) {
419 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
421 s->adaptive_quant = (avctx->lumi_masking ||
422 avctx->dark_masking ||
423 avctx->temporal_cplx_masking ||
424 avctx->spatial_cplx_masking ||
427 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
430 s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
432 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
433 switch(avctx->codec_id) {
434 case AV_CODEC_ID_MPEG1VIDEO:
435 case AV_CODEC_ID_MPEG2VIDEO:
436 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
438 case AV_CODEC_ID_MPEG4:
439 case AV_CODEC_ID_MSMPEG4V1:
440 case AV_CODEC_ID_MSMPEG4V2:
441 case AV_CODEC_ID_MSMPEG4V3:
442 if (avctx->rc_max_rate >= 15000000) {
443 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
444 } else if(avctx->rc_max_rate >= 2000000) {
445 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
446 } else if(avctx->rc_max_rate >= 384000) {
447 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
449 avctx->rc_buffer_size = 40;
450 avctx->rc_buffer_size *= 16384;
453 if (avctx->rc_buffer_size) {
454 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
458 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
459 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
460 return AVERROR(EINVAL);
463 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
464 av_log(avctx, AV_LOG_INFO,
465 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
468 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
469 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
470 return AVERROR(EINVAL);
473 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
474 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
475 return AVERROR(EINVAL);
478 if (avctx->rc_max_rate &&
479 avctx->rc_max_rate == avctx->bit_rate &&
480 avctx->rc_max_rate != avctx->rc_min_rate) {
481 av_log(avctx, AV_LOG_INFO,
482 "impossible bitrate constraints, this will fail\n");
485 if (avctx->rc_buffer_size &&
486 avctx->bit_rate * (int64_t)avctx->time_base.num >
487 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
488 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
489 return AVERROR(EINVAL);
492 if (!s->fixed_qscale &&
493 avctx->bit_rate * av_q2d(avctx->time_base) >
494 avctx->bit_rate_tolerance) {
495 av_log(avctx, AV_LOG_WARNING,
496 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
497 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
500 if (avctx->rc_max_rate &&
501 avctx->rc_min_rate == avctx->rc_max_rate &&
502 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
503 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
504 90000LL * (avctx->rc_buffer_size - 1) >
505 avctx->rc_max_rate * 0xFFFFLL) {
506 av_log(avctx, AV_LOG_INFO,
507 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
508 "specified vbv buffer is too large for the given bitrate!\n");
511 if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
512 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
513 s->codec_id != AV_CODEC_ID_FLV1) {
514 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
515 return AVERROR(EINVAL);
518 if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
519 av_log(avctx, AV_LOG_ERROR,
520 "OBMC is only supported with simple mb decision\n");
521 return AVERROR(EINVAL);
524 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
525 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
526 return AVERROR(EINVAL);
529 if (s->max_b_frames &&
530 s->codec_id != AV_CODEC_ID_MPEG4 &&
531 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
532 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
533 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
534 return AVERROR(EINVAL);
536 if (s->max_b_frames < 0) {
537 av_log(avctx, AV_LOG_ERROR,
538 "max b frames must be 0 or positive for mpegvideo based encoders\n");
539 return AVERROR(EINVAL);
542 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
543 s->codec_id == AV_CODEC_ID_H263 ||
544 s->codec_id == AV_CODEC_ID_H263P) &&
545 (avctx->sample_aspect_ratio.num > 255 ||
546 avctx->sample_aspect_ratio.den > 255)) {
547 av_log(avctx, AV_LOG_WARNING,
548 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
549 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
550 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
551 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
554 if ((s->codec_id == AV_CODEC_ID_H263 ||
555 s->codec_id == AV_CODEC_ID_H263P) &&
556 (avctx->width > 2048 ||
557 avctx->height > 1152 )) {
558 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
559 return AVERROR(EINVAL);
561 if ((s->codec_id == AV_CODEC_ID_H263 ||
562 s->codec_id == AV_CODEC_ID_H263P) &&
563 ((avctx->width &3) ||
564 (avctx->height&3) )) {
565 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
566 return AVERROR(EINVAL);
569 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
570 (avctx->width > 4095 ||
571 avctx->height > 4095 )) {
572 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
573 return AVERROR(EINVAL);
576 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
577 (avctx->width > 16383 ||
578 avctx->height > 16383 )) {
579 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
580 return AVERROR(EINVAL);
583 if (s->codec_id == AV_CODEC_ID_RV10 &&
585 avctx->height&15 )) {
586 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
587 return AVERROR(EINVAL);
590 if (s->codec_id == AV_CODEC_ID_RV20 &&
593 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
594 return AVERROR(EINVAL);
597 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
598 s->codec_id == AV_CODEC_ID_WMV2) &&
600 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
601 return AVERROR(EINVAL);
604 if ((avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
605 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
606 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
607 return AVERROR(EINVAL);
610 #if FF_API_PRIVATE_OPT
611 FF_DISABLE_DEPRECATION_WARNINGS
612 if (avctx->mpeg_quant)
613 s->mpeg_quant = avctx->mpeg_quant;
614 FF_ENABLE_DEPRECATION_WARNINGS
617 // FIXME mpeg2 uses that too
618 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
619 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
620 av_log(avctx, AV_LOG_ERROR,
621 "mpeg2 style quantization not supported by codec\n");
622 return AVERROR(EINVAL);
625 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
626 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
627 return AVERROR(EINVAL);
630 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
631 avctx->mb_decision != FF_MB_DECISION_RD) {
632 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
633 return AVERROR(EINVAL);
636 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
637 (s->codec_id == AV_CODEC_ID_AMV ||
638 s->codec_id == AV_CODEC_ID_MJPEG)) {
639 // Used to produce garbage with MJPEG.
640 av_log(avctx, AV_LOG_ERROR,
641 "QP RD is no longer compatible with MJPEG or AMV\n");
642 return AVERROR(EINVAL);
645 #if FF_API_PRIVATE_OPT
646 FF_DISABLE_DEPRECATION_WARNINGS
647 if (avctx->scenechange_threshold)
648 s->scenechange_threshold = avctx->scenechange_threshold;
649 FF_ENABLE_DEPRECATION_WARNINGS
652 if (s->scenechange_threshold < 1000000000 &&
653 (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
654 av_log(avctx, AV_LOG_ERROR,
655 "closed gop with scene change detection are not supported yet, "
656 "set threshold to 1000000000\n");
657 return AVERROR_PATCHWELCOME;
660 if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
661 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
662 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
663 av_log(avctx, AV_LOG_ERROR,
664 "low delay forcing is only available for mpeg2, "
665 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
666 return AVERROR(EINVAL);
668 if (s->max_b_frames != 0) {
669 av_log(avctx, AV_LOG_ERROR,
670 "B-frames cannot be used with low delay\n");
671 return AVERROR(EINVAL);
675 if (s->q_scale_type == 1) {
676 if (avctx->qmax > 28) {
677 av_log(avctx, AV_LOG_ERROR,
678 "non linear quant only supports qmax <= 28 currently\n");
679 return AVERROR_PATCHWELCOME;
683 if (avctx->slices > 1 &&
684 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
685 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
686 return AVERROR(EINVAL);
689 if (avctx->thread_count > 1 &&
690 s->codec_id != AV_CODEC_ID_MPEG4 &&
691 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
692 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
693 s->codec_id != AV_CODEC_ID_MJPEG &&
694 (s->codec_id != AV_CODEC_ID_H263P)) {
695 av_log(avctx, AV_LOG_ERROR,
696 "multi threaded encoding not supported by codec\n");
697 return AVERROR_PATCHWELCOME;
700 if (avctx->thread_count < 1) {
701 av_log(avctx, AV_LOG_ERROR,
702 "automatic thread number detection not supported by codec, "
704 return AVERROR_PATCHWELCOME;
707 if (!avctx->time_base.den || !avctx->time_base.num) {
708 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
709 return AVERROR(EINVAL);
712 #if FF_API_PRIVATE_OPT
713 FF_DISABLE_DEPRECATION_WARNINGS
714 if (avctx->b_frame_strategy)
715 s->b_frame_strategy = avctx->b_frame_strategy;
716 if (avctx->b_sensitivity != 40)
717 s->b_sensitivity = avctx->b_sensitivity;
718 FF_ENABLE_DEPRECATION_WARNINGS
721 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
722 av_log(avctx, AV_LOG_INFO,
723 "notice: b_frame_strategy only affects the first pass\n");
724 s->b_frame_strategy = 0;
727 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
729 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
730 avctx->time_base.den /= i;
731 avctx->time_base.num /= i;
735 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
736 // (a + x * 3 / 8) / x
737 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
738 s->inter_quant_bias = 0;
740 s->intra_quant_bias = 0;
742 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
745 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
746 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
747 return AVERROR(EINVAL);
750 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
752 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
753 avctx->time_base.den > (1 << 16) - 1) {
754 av_log(avctx, AV_LOG_ERROR,
755 "timebase %d/%d not supported by MPEG 4 standard, "
756 "the maximum admitted value for the timebase denominator "
757 "is %d\n", avctx->time_base.num, avctx->time_base.den,
759 return AVERROR(EINVAL);
761 s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
763 switch (avctx->codec->id) {
764 case AV_CODEC_ID_MPEG1VIDEO:
765 s->out_format = FMT_MPEG1;
766 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
767 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
769 case AV_CODEC_ID_MPEG2VIDEO:
770 s->out_format = FMT_MPEG1;
771 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
772 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
775 case AV_CODEC_ID_MJPEG:
776 case AV_CODEC_ID_AMV:
777 s->out_format = FMT_MJPEG;
778 s->intra_only = 1; /* force intra only for jpeg */
779 if (!CONFIG_MJPEG_ENCODER)
780 return AVERROR_ENCODER_NOT_FOUND;
781 if ((ret = ff_mjpeg_encode_init(s)) < 0)
786 case AV_CODEC_ID_H261:
787 if (!CONFIG_H261_ENCODER)
788 return AVERROR_ENCODER_NOT_FOUND;
789 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
790 av_log(avctx, AV_LOG_ERROR,
791 "The specified picture size of %dx%d is not valid for the "
792 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
793 s->width, s->height);
794 return AVERROR(EINVAL);
796 s->out_format = FMT_H261;
799 s->rtp_mode = 0; /* Sliced encoding not supported */
801 case AV_CODEC_ID_H263:
802 if (!CONFIG_H263_ENCODER)
803 return AVERROR_ENCODER_NOT_FOUND;
804 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
805 s->width, s->height) == 8) {
806 av_log(avctx, AV_LOG_ERROR,
807 "The specified picture size of %dx%d is not valid for "
808 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
809 "352x288, 704x576, and 1408x1152. "
810 "Try H.263+.\n", s->width, s->height);
811 return AVERROR(EINVAL);
813 s->out_format = FMT_H263;
817 case AV_CODEC_ID_H263P:
818 s->out_format = FMT_H263;
821 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
822 s->modified_quant = s->h263_aic;
823 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
824 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
827 /* These are just to be sure */
831 case AV_CODEC_ID_FLV1:
832 s->out_format = FMT_H263;
833 s->h263_flv = 2; /* format = 1; 11-bit codes */
834 s->unrestricted_mv = 1;
835 s->rtp_mode = 0; /* don't allow GOB */
839 case AV_CODEC_ID_RV10:
840 s->out_format = FMT_H263;
844 case AV_CODEC_ID_RV20:
845 s->out_format = FMT_H263;
848 s->modified_quant = 1;
852 s->unrestricted_mv = 0;
854 case AV_CODEC_ID_MPEG4:
855 s->out_format = FMT_H263;
857 s->unrestricted_mv = 1;
858 s->low_delay = s->max_b_frames ? 0 : 1;
859 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
861 case AV_CODEC_ID_MSMPEG4V2:
862 s->out_format = FMT_H263;
864 s->unrestricted_mv = 1;
865 s->msmpeg4_version = 2;
869 case AV_CODEC_ID_MSMPEG4V3:
870 s->out_format = FMT_H263;
872 s->unrestricted_mv = 1;
873 s->msmpeg4_version = 3;
874 s->flipflop_rounding = 1;
878 case AV_CODEC_ID_WMV1:
879 s->out_format = FMT_H263;
881 s->unrestricted_mv = 1;
882 s->msmpeg4_version = 4;
883 s->flipflop_rounding = 1;
887 case AV_CODEC_ID_WMV2:
888 s->out_format = FMT_H263;
890 s->unrestricted_mv = 1;
891 s->msmpeg4_version = 5;
892 s->flipflop_rounding = 1;
897 return AVERROR(EINVAL);
900 #if FF_API_PRIVATE_OPT
901 FF_DISABLE_DEPRECATION_WARNINGS
902 if (avctx->noise_reduction)
903 s->noise_reduction = avctx->noise_reduction;
904 FF_ENABLE_DEPRECATION_WARNINGS
907 avctx->has_b_frames = !s->low_delay;
911 s->progressive_frame =
912 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
913 AV_CODEC_FLAG_INTERLACED_ME) ||
918 if ((ret = ff_mpv_common_init(s)) < 0)
921 ff_fdctdsp_init(&s->fdsp, avctx);
922 ff_me_cmp_init(&s->mecc, avctx);
923 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
924 ff_pixblockdsp_init(&s->pdsp, avctx);
925 ff_qpeldsp_init(&s->qdsp);
927 if (s->msmpeg4_version) {
928 int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
929 if (!(s->ac_stats = av_mallocz(ac_stats_size)))
930 return AVERROR(ENOMEM);
933 if (!(avctx->stats_out = av_mallocz(256)) ||
934 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
935 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
936 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
937 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
938 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
939 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
940 !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
941 !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
942 return AVERROR(ENOMEM);
944 if (s->noise_reduction) {
945 if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
946 return AVERROR(ENOMEM);
949 ff_dct_encode_init(s);
951 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
952 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
954 if (s->slice_context_count > 1) {
957 if (avctx->codec_id == AV_CODEC_ID_H263P)
958 s->h263_slice_structured = 1;
961 s->quant_precision = 5;
963 #if FF_API_PRIVATE_OPT
964 FF_DISABLE_DEPRECATION_WARNINGS
965 if (avctx->frame_skip_threshold)
966 s->frame_skip_threshold = avctx->frame_skip_threshold;
967 if (avctx->frame_skip_factor)
968 s->frame_skip_factor = avctx->frame_skip_factor;
969 if (avctx->frame_skip_exp)
970 s->frame_skip_exp = avctx->frame_skip_exp;
971 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
972 s->frame_skip_cmp = avctx->frame_skip_cmp;
973 FF_ENABLE_DEPRECATION_WARNINGS
976 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
977 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
979 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
980 ff_h261_encode_init(s);
981 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
982 ff_h263_encode_init(s);
983 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
984 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
986 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
987 && s->out_format == FMT_MPEG1)
988 ff_mpeg1_encode_init(s);
991 for (i = 0; i < 64; i++) {
992 int j = s->idsp.idct_permutation[i];
993 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
995 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
996 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
997 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
999 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1002 s->chroma_intra_matrix[j] =
1003 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1004 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1006 if (avctx->intra_matrix)
1007 s->intra_matrix[j] = avctx->intra_matrix[i];
1008 if (avctx->inter_matrix)
1009 s->inter_matrix[j] = avctx->inter_matrix[i];
1012 /* precompute matrix */
1013 /* for mjpeg, we do include qscale in the matrix */
1014 if (s->out_format != FMT_MJPEG) {
1015 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1016 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1018 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1019 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1023 if ((ret = ff_rate_control_init(s)) < 0)
1026 #if FF_API_PRIVATE_OPT
1027 FF_DISABLE_DEPRECATION_WARNINGS
1028 if (avctx->brd_scale)
1029 s->brd_scale = avctx->brd_scale;
1031 if (avctx->prediction_method)
1032 s->pred = avctx->prediction_method + 1;
1033 FF_ENABLE_DEPRECATION_WARNINGS
1036 if (s->b_frame_strategy == 2) {
1037 for (i = 0; i < s->max_b_frames + 2; i++) {
1038 s->tmp_frames[i] = av_frame_alloc();
1039 if (!s->tmp_frames[i])
1040 return AVERROR(ENOMEM);
1042 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1043 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1044 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1046 ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1052 cpb_props = ff_add_cpb_side_data(avctx);
1054 return AVERROR(ENOMEM);
1055 cpb_props->max_bitrate = avctx->rc_max_rate;
1056 cpb_props->min_bitrate = avctx->rc_min_rate;
1057 cpb_props->avg_bitrate = avctx->bit_rate;
1058 cpb_props->buffer_size = avctx->rc_buffer_size;
1063 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1065 MpegEncContext *s = avctx->priv_data;
1068 ff_rate_control_uninit(s);
1070 ff_mpv_common_end(s);
1071 if (CONFIG_MJPEG_ENCODER &&
1072 s->out_format == FMT_MJPEG)
1073 ff_mjpeg_encode_close(s);
1075 av_freep(&avctx->extradata);
1077 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1078 av_frame_free(&s->tmp_frames[i]);
1080 ff_free_picture_tables(&s->new_picture);
1081 ff_mpeg_unref_picture(avctx, &s->new_picture);
1083 av_freep(&avctx->stats_out);
1084 av_freep(&s->ac_stats);
1086 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1087 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1088 s->q_chroma_intra_matrix= NULL;
1089 s->q_chroma_intra_matrix16= NULL;
1090 av_freep(&s->q_intra_matrix);
1091 av_freep(&s->q_inter_matrix);
1092 av_freep(&s->q_intra_matrix16);
1093 av_freep(&s->q_inter_matrix16);
1094 av_freep(&s->input_picture);
1095 av_freep(&s->reordered_input_picture);
1096 av_freep(&s->dct_offset);
1101 static int get_sae(uint8_t *src, int ref, int stride)
1106 for (y = 0; y < 16; y++) {
1107 for (x = 0; x < 16; x++) {
1108 acc += FFABS(src[x + y * stride] - ref);
1115 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1116 uint8_t *ref, int stride)
1122 h = s->height & ~15;
1124 for (y = 0; y < h; y += 16) {
1125 for (x = 0; x < w; x += 16) {
1126 int offset = x + y * stride;
1127 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1129 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1130 int sae = get_sae(src + offset, mean, stride);
1132 acc += sae + 500 < sad;
1138 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1140 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1141 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1142 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1143 &s->linesize, &s->uvlinesize);
1146 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1148 Picture *pic = NULL;
1150 int i, display_picture_number = 0, ret;
1151 int encoding_delay = s->max_b_frames ? s->max_b_frames
1152 : (s->low_delay ? 0 : 1);
1153 int flush_offset = 1;
1158 display_picture_number = s->input_picture_number++;
1160 if (pts != AV_NOPTS_VALUE) {
1161 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1162 int64_t last = s->user_specified_pts;
1165 av_log(s->avctx, AV_LOG_ERROR,
1166 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1168 return AVERROR(EINVAL);
1171 if (!s->low_delay && display_picture_number == 1)
1172 s->dts_delta = pts - last;
1174 s->user_specified_pts = pts;
1176 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1177 s->user_specified_pts =
1178 pts = s->user_specified_pts + 1;
1179 av_log(s->avctx, AV_LOG_INFO,
1180 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1183 pts = display_picture_number;
1187 if (!pic_arg->buf[0] ||
1188 pic_arg->linesize[0] != s->linesize ||
1189 pic_arg->linesize[1] != s->uvlinesize ||
1190 pic_arg->linesize[2] != s->uvlinesize)
1192 if ((s->width & 15) || (s->height & 15))
1194 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1196 if (s->linesize & (STRIDE_ALIGN-1))
1199 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1200 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1202 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1206 pic = &s->picture[i];
1210 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1213 ret = alloc_picture(s, pic, direct);
1218 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1219 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1220 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1223 int h_chroma_shift, v_chroma_shift;
1224 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1228 for (i = 0; i < 3; i++) {
1229 int src_stride = pic_arg->linesize[i];
1230 int dst_stride = i ? s->uvlinesize : s->linesize;
1231 int h_shift = i ? h_chroma_shift : 0;
1232 int v_shift = i ? v_chroma_shift : 0;
1233 int w = s->width >> h_shift;
1234 int h = s->height >> v_shift;
1235 uint8_t *src = pic_arg->data[i];
1236 uint8_t *dst = pic->f->data[i];
1239 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1240 && !s->progressive_sequence
1241 && FFALIGN(s->height, 32) - s->height > 16)
1244 if (!s->avctx->rc_buffer_size)
1245 dst += INPLACE_OFFSET;
1247 if (src_stride == dst_stride)
1248 memcpy(dst, src, src_stride * h);
1251 uint8_t *dst2 = dst;
1253 memcpy(dst2, src, w);
1258 if ((s->width & 15) || (s->height & (vpad-1))) {
1259 s->mpvencdsp.draw_edges(dst, dst_stride,
1269 ret = av_frame_copy_props(pic->f, pic_arg);
1273 pic->f->display_picture_number = display_picture_number;
1274 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1276 /* Flushing: When we have not received enough input frames,
1277 * ensure s->input_picture[0] contains the first picture */
1278 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1279 if (s->input_picture[flush_offset])
1282 if (flush_offset <= 1)
1285 encoding_delay = encoding_delay - flush_offset + 1;
1288 /* shift buffer entries */
1289 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1290 s->input_picture[i - flush_offset] = s->input_picture[i];
1292 s->input_picture[encoding_delay] = (Picture*) pic;
1297 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1301 int64_t score64 = 0;
1303 for (plane = 0; plane < 3; plane++) {
1304 const int stride = p->f->linesize[plane];
1305 const int bw = plane ? 1 : 2;
1306 for (y = 0; y < s->mb_height * bw; y++) {
1307 for (x = 0; x < s->mb_width * bw; x++) {
1308 int off = p->shared ? 0 : 16;
1309 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1310 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1311 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1313 switch (FFABS(s->frame_skip_exp)) {
1314 case 0: score = FFMAX(score, v); break;
1315 case 1: score += FFABS(v); break;
1316 case 2: score64 += v * (int64_t)v; break;
1317 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1318 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1327 if (s->frame_skip_exp < 0)
1328 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1329 -1.0/s->frame_skip_exp);
1331 if (score64 < s->frame_skip_threshold)
1333 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1338 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1340 AVPacket pkt = { 0 };
1344 av_init_packet(&pkt);
1346 ret = avcodec_send_frame(c, frame);
1351 ret = avcodec_receive_packet(c, &pkt);
1354 av_packet_unref(&pkt);
1355 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1362 static int estimate_best_b_count(MpegEncContext *s)
1364 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1365 const int scale = s->brd_scale;
1366 int width = s->width >> scale;
1367 int height = s->height >> scale;
1368 int i, j, out_size, p_lambda, b_lambda, lambda2;
1369 int64_t best_rd = INT64_MAX;
1370 int best_b_count = -1;
1373 av_assert0(scale >= 0 && scale <= 3);
1376 //s->next_picture_ptr->quality;
1377 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1378 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1379 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1380 if (!b_lambda) // FIXME we should do this somewhere else
1381 b_lambda = p_lambda;
1382 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1385 for (i = 0; i < s->max_b_frames + 2; i++) {
1386 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1387 s->next_picture_ptr;
1390 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1391 pre_input = *pre_input_ptr;
1392 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1394 if (!pre_input.shared && i) {
1395 data[0] += INPLACE_OFFSET;
1396 data[1] += INPLACE_OFFSET;
1397 data[2] += INPLACE_OFFSET;
1400 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1401 s->tmp_frames[i]->linesize[0],
1403 pre_input.f->linesize[0],
1405 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1406 s->tmp_frames[i]->linesize[1],
1408 pre_input.f->linesize[1],
1409 width >> 1, height >> 1);
1410 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1411 s->tmp_frames[i]->linesize[2],
1413 pre_input.f->linesize[2],
1414 width >> 1, height >> 1);
1418 for (j = 0; j < s->max_b_frames + 1; j++) {
1422 if (!s->input_picture[j])
1425 c = avcodec_alloc_context3(NULL);
1427 return AVERROR(ENOMEM);
1431 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1432 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1433 c->mb_decision = s->avctx->mb_decision;
1434 c->me_cmp = s->avctx->me_cmp;
1435 c->mb_cmp = s->avctx->mb_cmp;
1436 c->me_sub_cmp = s->avctx->me_sub_cmp;
1437 c->pix_fmt = AV_PIX_FMT_YUV420P;
1438 c->time_base = s->avctx->time_base;
1439 c->max_b_frames = s->max_b_frames;
1441 ret = avcodec_open2(c, codec, NULL);
1445 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1446 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1448 out_size = encode_frame(c, s->tmp_frames[0]);
1454 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1456 for (i = 0; i < s->max_b_frames + 1; i++) {
1457 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1459 s->tmp_frames[i + 1]->pict_type = is_p ?
1460 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1461 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1463 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1469 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1472 /* get the delayed frames */
1473 out_size = encode_frame(c, NULL);
1478 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1480 rd += c->error[0] + c->error[1] + c->error[2];
1488 avcodec_free_context(&c);
1493 return best_b_count;
1496 static int select_input_picture(MpegEncContext *s)
1500 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1501 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1502 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1504 /* set next picture type & ordering */
1505 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1506 if (s->frame_skip_threshold || s->frame_skip_factor) {
1507 if (s->picture_in_gop_number < s->gop_size &&
1508 s->next_picture_ptr &&
1509 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1510 // FIXME check that the gop check above is +-1 correct
1511 av_frame_unref(s->input_picture[0]->f);
1513 ff_vbv_update(s, 0);
1519 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1520 !s->next_picture_ptr || s->intra_only) {
1521 s->reordered_input_picture[0] = s->input_picture[0];
1522 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1523 s->reordered_input_picture[0]->f->coded_picture_number =
1524 s->coded_picture_number++;
1528 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1529 for (i = 0; i < s->max_b_frames + 1; i++) {
1530 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1532 if (pict_num >= s->rc_context.num_entries)
1534 if (!s->input_picture[i]) {
1535 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1539 s->input_picture[i]->f->pict_type =
1540 s->rc_context.entry[pict_num].new_pict_type;
1544 if (s->b_frame_strategy == 0) {
1545 b_frames = s->max_b_frames;
1546 while (b_frames && !s->input_picture[b_frames])
1548 } else if (s->b_frame_strategy == 1) {
1549 for (i = 1; i < s->max_b_frames + 1; i++) {
1550 if (s->input_picture[i] &&
1551 s->input_picture[i]->b_frame_score == 0) {
1552 s->input_picture[i]->b_frame_score =
1554 s->input_picture[i ]->f->data[0],
1555 s->input_picture[i - 1]->f->data[0],
1559 for (i = 0; i < s->max_b_frames + 1; i++) {
1560 if (!s->input_picture[i] ||
1561 s->input_picture[i]->b_frame_score - 1 >
1562 s->mb_num / s->b_sensitivity)
1566 b_frames = FFMAX(0, i - 1);
1569 for (i = 0; i < b_frames + 1; i++) {
1570 s->input_picture[i]->b_frame_score = 0;
1572 } else if (s->b_frame_strategy == 2) {
1573 b_frames = estimate_best_b_count(s);
1580 for (i = b_frames - 1; i >= 0; i--) {
1581 int type = s->input_picture[i]->f->pict_type;
1582 if (type && type != AV_PICTURE_TYPE_B)
1585 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1586 b_frames == s->max_b_frames) {
1587 av_log(s->avctx, AV_LOG_ERROR,
1588 "warning, too many B-frames in a row\n");
1591 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1592 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1593 s->gop_size > s->picture_in_gop_number) {
1594 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1596 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1598 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1602 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1603 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1606 s->reordered_input_picture[0] = s->input_picture[b_frames];
1607 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1608 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1609 s->reordered_input_picture[0]->f->coded_picture_number =
1610 s->coded_picture_number++;
1611 for (i = 0; i < b_frames; i++) {
1612 s->reordered_input_picture[i + 1] = s->input_picture[i];
1613 s->reordered_input_picture[i + 1]->f->pict_type =
1615 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1616 s->coded_picture_number++;
1621 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1623 if (s->reordered_input_picture[0]) {
1624 s->reordered_input_picture[0]->reference =
1625 s->reordered_input_picture[0]->f->pict_type !=
1626 AV_PICTURE_TYPE_B ? 3 : 0;
1628 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1631 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1632 // input is a shared pix, so we can't modify it -> allocate a new
1633 // one & ensure that the shared one is reuseable
1636 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1639 pic = &s->picture[i];
1641 pic->reference = s->reordered_input_picture[0]->reference;
1642 if (alloc_picture(s, pic, 0) < 0) {
1646 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1650 /* mark us unused / free shared pic */
1651 av_frame_unref(s->reordered_input_picture[0]->f);
1652 s->reordered_input_picture[0]->shared = 0;
1654 s->current_picture_ptr = pic;
1656 // input is not a shared pix -> reuse buffer for current_pix
1657 s->current_picture_ptr = s->reordered_input_picture[0];
1658 for (i = 0; i < 4; i++) {
1659 s->new_picture.f->data[i] += INPLACE_OFFSET;
1662 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1663 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1664 s->current_picture_ptr)) < 0)
1667 s->picture_number = s->new_picture.f->display_picture_number;
1672 static void frame_end(MpegEncContext *s)
1674 if (s->unrestricted_mv &&
1675 s->current_picture.reference &&
1677 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1678 int hshift = desc->log2_chroma_w;
1679 int vshift = desc->log2_chroma_h;
1680 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1681 s->current_picture.f->linesize[0],
1682 s->h_edge_pos, s->v_edge_pos,
1683 EDGE_WIDTH, EDGE_WIDTH,
1684 EDGE_TOP | EDGE_BOTTOM);
1685 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1686 s->current_picture.f->linesize[1],
1687 s->h_edge_pos >> hshift,
1688 s->v_edge_pos >> vshift,
1689 EDGE_WIDTH >> hshift,
1690 EDGE_WIDTH >> vshift,
1691 EDGE_TOP | EDGE_BOTTOM);
1692 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1693 s->current_picture.f->linesize[2],
1694 s->h_edge_pos >> hshift,
1695 s->v_edge_pos >> vshift,
1696 EDGE_WIDTH >> hshift,
1697 EDGE_WIDTH >> vshift,
1698 EDGE_TOP | EDGE_BOTTOM);
1703 s->last_pict_type = s->pict_type;
1704 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1705 if (s->pict_type!= AV_PICTURE_TYPE_B)
1706 s->last_non_b_pict_type = s->pict_type;
1708 #if FF_API_CODED_FRAME
1709 FF_DISABLE_DEPRECATION_WARNINGS
1710 av_frame_unref(s->avctx->coded_frame);
1711 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1712 FF_ENABLE_DEPRECATION_WARNINGS
1714 #if FF_API_ERROR_FRAME
1715 FF_DISABLE_DEPRECATION_WARNINGS
1716 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1717 sizeof(s->current_picture.encoding_error));
1718 FF_ENABLE_DEPRECATION_WARNINGS
1722 static void update_noise_reduction(MpegEncContext *s)
1726 for (intra = 0; intra < 2; intra++) {
1727 if (s->dct_count[intra] > (1 << 16)) {
1728 for (i = 0; i < 64; i++) {
1729 s->dct_error_sum[intra][i] >>= 1;
1731 s->dct_count[intra] >>= 1;
1734 for (i = 0; i < 64; i++) {
1735 s->dct_offset[intra][i] = (s->noise_reduction *
1736 s->dct_count[intra] +
1737 s->dct_error_sum[intra][i] / 2) /
1738 (s->dct_error_sum[intra][i] + 1);
1743 static int frame_start(MpegEncContext *s)
1747 /* mark & release old frames */
1748 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1749 s->last_picture_ptr != s->next_picture_ptr &&
1750 s->last_picture_ptr->f->buf[0]) {
1751 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1754 s->current_picture_ptr->f->pict_type = s->pict_type;
1755 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1757 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1758 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1759 s->current_picture_ptr)) < 0)
1762 if (s->pict_type != AV_PICTURE_TYPE_B) {
1763 s->last_picture_ptr = s->next_picture_ptr;
1765 s->next_picture_ptr = s->current_picture_ptr;
1768 if (s->last_picture_ptr) {
1769 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1770 if (s->last_picture_ptr->f->buf[0] &&
1771 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1772 s->last_picture_ptr)) < 0)
1775 if (s->next_picture_ptr) {
1776 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1777 if (s->next_picture_ptr->f->buf[0] &&
1778 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1779 s->next_picture_ptr)) < 0)
1783 if (s->picture_structure!= PICT_FRAME) {
1785 for (i = 0; i < 4; i++) {
1786 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1787 s->current_picture.f->data[i] +=
1788 s->current_picture.f->linesize[i];
1790 s->current_picture.f->linesize[i] *= 2;
1791 s->last_picture.f->linesize[i] *= 2;
1792 s->next_picture.f->linesize[i] *= 2;
1796 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1797 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1798 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1799 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1800 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1801 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1803 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1804 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1807 if (s->dct_error_sum) {
1808 av_assert2(s->noise_reduction && s->encoding);
1809 update_noise_reduction(s);
1815 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1816 const AVFrame *pic_arg, int *got_packet)
1818 MpegEncContext *s = avctx->priv_data;
1819 int i, stuffing_count, ret;
1820 int context_count = s->slice_context_count;
1822 s->vbv_ignore_qmax = 0;
1824 s->picture_in_gop_number++;
1826 if (load_input_picture(s, pic_arg) < 0)
1829 if (select_input_picture(s) < 0) {
1834 if (s->new_picture.f->data[0]) {
1835 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1836 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1838 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1839 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1842 s->mb_info_ptr = av_packet_new_side_data(pkt,
1843 AV_PKT_DATA_H263_MB_INFO,
1844 s->mb_width*s->mb_height*12);
1845 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1848 for (i = 0; i < context_count; i++) {
1849 int start_y = s->thread_context[i]->start_mb_y;
1850 int end_y = s->thread_context[i]-> end_mb_y;
1851 int h = s->mb_height;
1852 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1853 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1855 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1858 s->pict_type = s->new_picture.f->pict_type;
1860 ret = frame_start(s);
1864 ret = encode_picture(s, s->picture_number);
1865 if (growing_buffer) {
1866 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1867 pkt->data = s->pb.buf;
1868 pkt->size = avctx->internal->byte_buffer_size;
1873 #if FF_API_STAT_BITS
1874 FF_DISABLE_DEPRECATION_WARNINGS
1875 avctx->header_bits = s->header_bits;
1876 avctx->mv_bits = s->mv_bits;
1877 avctx->misc_bits = s->misc_bits;
1878 avctx->i_tex_bits = s->i_tex_bits;
1879 avctx->p_tex_bits = s->p_tex_bits;
1880 avctx->i_count = s->i_count;
1881 // FIXME f/b_count in avctx
1882 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1883 avctx->skip_count = s->skip_count;
1884 FF_ENABLE_DEPRECATION_WARNINGS
1889 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1890 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1892 if (avctx->rc_buffer_size) {
1893 RateControlContext *rcc = &s->rc_context;
1894 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1895 int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1896 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1898 if (put_bits_count(&s->pb) > max_size &&
1899 s->lambda < s->lmax) {
1900 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1901 (s->qscale + 1) / s->qscale);
1902 if (s->adaptive_quant) {
1904 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1905 s->lambda_table[i] =
1906 FFMAX(s->lambda_table[i] + min_step,
1907 s->lambda_table[i] * (s->qscale + 1) /
1910 s->mb_skipped = 0; // done in frame_start()
1911 // done in encode_picture() so we must undo it
1912 if (s->pict_type == AV_PICTURE_TYPE_P) {
1913 if (s->flipflop_rounding ||
1914 s->codec_id == AV_CODEC_ID_H263P ||
1915 s->codec_id == AV_CODEC_ID_MPEG4)
1916 s->no_rounding ^= 1;
1918 if (s->pict_type != AV_PICTURE_TYPE_B) {
1919 s->time_base = s->last_time_base;
1920 s->last_non_b_time = s->time - s->pp_time;
1922 for (i = 0; i < context_count; i++) {
1923 PutBitContext *pb = &s->thread_context[i]->pb;
1924 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1926 s->vbv_ignore_qmax = 1;
1927 av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1931 av_assert0(avctx->rc_max_rate);
1934 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1935 ff_write_pass1_stats(s);
1937 for (i = 0; i < 4; i++) {
1938 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1939 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1941 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1942 s->current_picture_ptr->encoding_error,
1943 (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1946 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1947 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1948 s->misc_bits + s->i_tex_bits +
1950 flush_put_bits(&s->pb);
1951 s->frame_bits = put_bits_count(&s->pb);
1953 stuffing_count = ff_vbv_update(s, s->frame_bits);
1954 s->stuffing_bits = 8*stuffing_count;
1955 if (stuffing_count) {
1956 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1957 stuffing_count + 50) {
1958 av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1962 switch (s->codec_id) {
1963 case AV_CODEC_ID_MPEG1VIDEO:
1964 case AV_CODEC_ID_MPEG2VIDEO:
1965 while (stuffing_count--) {
1966 put_bits(&s->pb, 8, 0);
1969 case AV_CODEC_ID_MPEG4:
1970 put_bits(&s->pb, 16, 0);
1971 put_bits(&s->pb, 16, 0x1C3);
1972 stuffing_count -= 4;
1973 while (stuffing_count--) {
1974 put_bits(&s->pb, 8, 0xFF);
1978 av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1980 flush_put_bits(&s->pb);
1981 s->frame_bits = put_bits_count(&s->pb);
1984 /* update MPEG-1/2 vbv_delay for CBR */
1985 if (avctx->rc_max_rate &&
1986 avctx->rc_min_rate == avctx->rc_max_rate &&
1987 s->out_format == FMT_MPEG1 &&
1988 90000LL * (avctx->rc_buffer_size - 1) <=
1989 avctx->rc_max_rate * 0xFFFFLL) {
1990 AVCPBProperties *props;
1993 int vbv_delay, min_delay;
1994 double inbits = avctx->rc_max_rate *
1995 av_q2d(avctx->time_base);
1996 int minbits = s->frame_bits - 8 *
1997 (s->vbv_delay_ptr - s->pb.buf - 1);
1998 double bits = s->rc_context.buffer_index + minbits - inbits;
2001 av_log(avctx, AV_LOG_ERROR,
2002 "Internal error, negative bits\n");
2004 av_assert1(s->repeat_first_field == 0);
2006 vbv_delay = bits * 90000 / avctx->rc_max_rate;
2007 min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2010 vbv_delay = FFMAX(vbv_delay, min_delay);
2012 av_assert0(vbv_delay < 0xFFFF);
2014 s->vbv_delay_ptr[0] &= 0xF8;
2015 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2016 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2017 s->vbv_delay_ptr[2] &= 0x07;
2018 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2020 props = av_cpb_properties_alloc(&props_size);
2022 return AVERROR(ENOMEM);
2023 props->vbv_delay = vbv_delay * 300;
2025 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2026 (uint8_t*)props, props_size);
2032 #if FF_API_VBV_DELAY
2033 FF_DISABLE_DEPRECATION_WARNINGS
2034 avctx->vbv_delay = vbv_delay * 300;
2035 FF_ENABLE_DEPRECATION_WARNINGS
2038 s->total_bits += s->frame_bits;
2039 #if FF_API_STAT_BITS
2040 FF_DISABLE_DEPRECATION_WARNINGS
2041 avctx->frame_bits = s->frame_bits;
2042 FF_ENABLE_DEPRECATION_WARNINGS
2046 pkt->pts = s->current_picture.f->pts;
2047 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2048 if (!s->current_picture.f->coded_picture_number)
2049 pkt->dts = pkt->pts - s->dts_delta;
2051 pkt->dts = s->reordered_pts;
2052 s->reordered_pts = pkt->pts;
2054 pkt->dts = pkt->pts;
2055 if (s->current_picture.f->key_frame)
2056 pkt->flags |= AV_PKT_FLAG_KEY;
2058 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2063 /* release non-reference frames */
2064 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2065 if (!s->picture[i].reference)
2066 ff_mpeg_unref_picture(avctx, &s->picture[i]);
2069 av_assert1((s->frame_bits & 7) == 0);
2071 pkt->size = s->frame_bits / 8;
2072 *got_packet = !!pkt->size;
2076 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2077 int n, int threshold)
2079 static const char tab[64] = {
2080 3, 2, 2, 1, 1, 1, 1, 1,
2081 1, 1, 1, 1, 1, 1, 1, 1,
2082 1, 1, 1, 1, 1, 1, 1, 1,
2083 0, 0, 0, 0, 0, 0, 0, 0,
2084 0, 0, 0, 0, 0, 0, 0, 0,
2085 0, 0, 0, 0, 0, 0, 0, 0,
2086 0, 0, 0, 0, 0, 0, 0, 0,
2087 0, 0, 0, 0, 0, 0, 0, 0
2092 int16_t *block = s->block[n];
2093 const int last_index = s->block_last_index[n];
2096 if (threshold < 0) {
2098 threshold = -threshold;
2102 /* Are all we could set to zero already zero? */
2103 if (last_index <= skip_dc - 1)
2106 for (i = 0; i <= last_index; i++) {
2107 const int j = s->intra_scantable.permutated[i];
2108 const int level = FFABS(block[j]);
2110 if (skip_dc && i == 0)
2114 } else if (level > 1) {
2120 if (score >= threshold)
2122 for (i = skip_dc; i <= last_index; i++) {
2123 const int j = s->intra_scantable.permutated[i];
2127 s->block_last_index[n] = 0;
2129 s->block_last_index[n] = -1;
2132 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2136 const int maxlevel = s->max_qcoeff;
2137 const int minlevel = s->min_qcoeff;
2141 i = 1; // skip clipping of intra dc
2145 for (; i <= last_index; i++) {
2146 const int j = s->intra_scantable.permutated[i];
2147 int level = block[j];
2149 if (level > maxlevel) {
2152 } else if (level < minlevel) {
2160 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2161 av_log(s->avctx, AV_LOG_INFO,
2162 "warning, clipping %d dct coefficients to %d..%d\n",
2163 overflow, minlevel, maxlevel);
2166 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2170 for (y = 0; y < 8; y++) {
2171 for (x = 0; x < 8; x++) {
2177 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2178 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2179 int v = ptr[x2 + y2 * stride];
2185 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2190 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2191 int motion_x, int motion_y,
2192 int mb_block_height,
2196 int16_t weight[12][64];
2197 int16_t orig[12][64];
2198 const int mb_x = s->mb_x;
2199 const int mb_y = s->mb_y;
2202 int dct_offset = s->linesize * 8; // default for progressive frames
2203 int uv_dct_offset = s->uvlinesize * 8;
2204 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2205 ptrdiff_t wrap_y, wrap_c;
2207 for (i = 0; i < mb_block_count; i++)
2208 skip_dct[i] = s->skipdct;
2210 if (s->adaptive_quant) {
2211 const int last_qp = s->qscale;
2212 const int mb_xy = mb_x + mb_y * s->mb_stride;
2214 s->lambda = s->lambda_table[mb_xy];
2217 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2218 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2219 s->dquant = s->qscale - last_qp;
2221 if (s->out_format == FMT_H263) {
2222 s->dquant = av_clip(s->dquant, -2, 2);
2224 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2226 if (s->pict_type == AV_PICTURE_TYPE_B) {
2227 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2230 if (s->mv_type == MV_TYPE_8X8)
2236 ff_set_qscale(s, last_qp + s->dquant);
2237 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2238 ff_set_qscale(s, s->qscale + s->dquant);
2240 wrap_y = s->linesize;
2241 wrap_c = s->uvlinesize;
2242 ptr_y = s->new_picture.f->data[0] +
2243 (mb_y * 16 * wrap_y) + mb_x * 16;
2244 ptr_cb = s->new_picture.f->data[1] +
2245 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2246 ptr_cr = s->new_picture.f->data[2] +
2247 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2249 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2250 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2251 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2252 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2253 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2255 16, 16, mb_x * 16, mb_y * 16,
2256 s->width, s->height);
2258 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2260 mb_block_width, mb_block_height,
2261 mb_x * mb_block_width, mb_y * mb_block_height,
2263 ptr_cb = ebuf + 16 * wrap_y;
2264 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2266 mb_block_width, mb_block_height,
2267 mb_x * mb_block_width, mb_y * mb_block_height,
2269 ptr_cr = ebuf + 16 * wrap_y + 16;
2273 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2274 int progressive_score, interlaced_score;
2276 s->interlaced_dct = 0;
2277 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2278 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2279 NULL, wrap_y, 8) - 400;
2281 if (progressive_score > 0) {
2282 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2283 NULL, wrap_y * 2, 8) +
2284 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2285 NULL, wrap_y * 2, 8);
2286 if (progressive_score > interlaced_score) {
2287 s->interlaced_dct = 1;
2289 dct_offset = wrap_y;
2290 uv_dct_offset = wrap_c;
2292 if (s->chroma_format == CHROMA_422 ||
2293 s->chroma_format == CHROMA_444)
2299 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2300 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2301 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2302 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2304 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2308 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2309 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2310 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2311 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2312 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2313 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2314 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2315 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2316 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2317 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2318 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2319 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2323 op_pixels_func (*op_pix)[4];
2324 qpel_mc_func (*op_qpix)[16];
2325 uint8_t *dest_y, *dest_cb, *dest_cr;
2327 dest_y = s->dest[0];
2328 dest_cb = s->dest[1];
2329 dest_cr = s->dest[2];
2331 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2332 op_pix = s->hdsp.put_pixels_tab;
2333 op_qpix = s->qdsp.put_qpel_pixels_tab;
2335 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2336 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2339 if (s->mv_dir & MV_DIR_FORWARD) {
2340 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2341 s->last_picture.f->data,
2343 op_pix = s->hdsp.avg_pixels_tab;
2344 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2346 if (s->mv_dir & MV_DIR_BACKWARD) {
2347 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2348 s->next_picture.f->data,
2352 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2353 int progressive_score, interlaced_score;
2355 s->interlaced_dct = 0;
2356 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2357 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2361 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2362 progressive_score -= 400;
2364 if (progressive_score > 0) {
2365 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2367 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2371 if (progressive_score > interlaced_score) {
2372 s->interlaced_dct = 1;
2374 dct_offset = wrap_y;
2375 uv_dct_offset = wrap_c;
2377 if (s->chroma_format == CHROMA_422)
2383 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2384 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2385 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2386 dest_y + dct_offset, wrap_y);
2387 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2388 dest_y + dct_offset + 8, wrap_y);
2390 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2394 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2395 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2396 if (!s->chroma_y_shift) { /* 422 */
2397 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2398 dest_cb + uv_dct_offset, wrap_c);
2399 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2400 dest_cr + uv_dct_offset, wrap_c);
2403 /* pre quantization */
2404 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2405 2 * s->qscale * s->qscale) {
2407 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2409 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2411 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2412 wrap_y, 8) < 20 * s->qscale)
2414 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2415 wrap_y, 8) < 20 * s->qscale)
2417 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2419 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2421 if (!s->chroma_y_shift) { /* 422 */
2422 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2423 dest_cb + uv_dct_offset,
2424 wrap_c, 8) < 20 * s->qscale)
2426 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2427 dest_cr + uv_dct_offset,
2428 wrap_c, 8) < 20 * s->qscale)
2434 if (s->quantizer_noise_shaping) {
2436 get_visual_weight(weight[0], ptr_y , wrap_y);
2438 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2440 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2442 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2444 get_visual_weight(weight[4], ptr_cb , wrap_c);
2446 get_visual_weight(weight[5], ptr_cr , wrap_c);
2447 if (!s->chroma_y_shift) { /* 422 */
2449 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2452 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2455 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2458 /* DCT & quantize */
2459 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2461 for (i = 0; i < mb_block_count; i++) {
2464 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2465 // FIXME we could decide to change to quantizer instead of
2467 // JS: I don't think that would be a good idea it could lower
2468 // quality instead of improve it. Just INTRADC clipping
2469 // deserves changes in quantizer
2471 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2473 s->block_last_index[i] = -1;
2475 if (s->quantizer_noise_shaping) {
2476 for (i = 0; i < mb_block_count; i++) {
2478 s->block_last_index[i] =
2479 dct_quantize_refine(s, s->block[i], weight[i],
2480 orig[i], i, s->qscale);
2485 if (s->luma_elim_threshold && !s->mb_intra)
2486 for (i = 0; i < 4; i++)
2487 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2488 if (s->chroma_elim_threshold && !s->mb_intra)
2489 for (i = 4; i < mb_block_count; i++)
2490 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2492 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2493 for (i = 0; i < mb_block_count; i++) {
2494 if (s->block_last_index[i] == -1)
2495 s->coded_score[i] = INT_MAX / 256;
2500 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2501 s->block_last_index[4] =
2502 s->block_last_index[5] = 0;
2504 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2505 if (!s->chroma_y_shift) { /* 422 / 444 */
2506 for (i=6; i<12; i++) {
2507 s->block_last_index[i] = 0;
2508 s->block[i][0] = s->block[4][0];
2513 // non c quantize code returns incorrect block_last_index FIXME
2514 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2515 for (i = 0; i < mb_block_count; i++) {
2517 if (s->block_last_index[i] > 0) {
2518 for (j = 63; j > 0; j--) {
2519 if (s->block[i][s->intra_scantable.permutated[j]])
2522 s->block_last_index[i] = j;
2527 /* huffman encode */
2528 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2529 case AV_CODEC_ID_MPEG1VIDEO:
2530 case AV_CODEC_ID_MPEG2VIDEO:
2531 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2532 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2534 case AV_CODEC_ID_MPEG4:
2535 if (CONFIG_MPEG4_ENCODER)
2536 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2538 case AV_CODEC_ID_MSMPEG4V2:
2539 case AV_CODEC_ID_MSMPEG4V3:
2540 case AV_CODEC_ID_WMV1:
2541 if (CONFIG_MSMPEG4_ENCODER)
2542 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2544 case AV_CODEC_ID_WMV2:
2545 if (CONFIG_WMV2_ENCODER)
2546 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2548 case AV_CODEC_ID_H261:
2549 if (CONFIG_H261_ENCODER)
2550 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2552 case AV_CODEC_ID_H263:
2553 case AV_CODEC_ID_H263P:
2554 case AV_CODEC_ID_FLV1:
2555 case AV_CODEC_ID_RV10:
2556 case AV_CODEC_ID_RV20:
2557 if (CONFIG_H263_ENCODER)
2558 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2560 case AV_CODEC_ID_MJPEG:
2561 case AV_CODEC_ID_AMV:
2562 if (CONFIG_MJPEG_ENCODER)
2563 ff_mjpeg_encode_mb(s, s->block);
2570 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2572 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2573 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2574 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2577 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2580 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2583 d->mb_skip_run= s->mb_skip_run;
2585 d->last_dc[i] = s->last_dc[i];
2588 d->mv_bits= s->mv_bits;
2589 d->i_tex_bits= s->i_tex_bits;
2590 d->p_tex_bits= s->p_tex_bits;
2591 d->i_count= s->i_count;
2592 d->f_count= s->f_count;
2593 d->b_count= s->b_count;
2594 d->skip_count= s->skip_count;
2595 d->misc_bits= s->misc_bits;
2599 d->qscale= s->qscale;
2600 d->dquant= s->dquant;
2602 d->esc3_level_length= s->esc3_level_length;
2605 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2608 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2609 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2612 d->mb_skip_run= s->mb_skip_run;
2614 d->last_dc[i] = s->last_dc[i];
2617 d->mv_bits= s->mv_bits;
2618 d->i_tex_bits= s->i_tex_bits;
2619 d->p_tex_bits= s->p_tex_bits;
2620 d->i_count= s->i_count;
2621 d->f_count= s->f_count;
2622 d->b_count= s->b_count;
2623 d->skip_count= s->skip_count;
2624 d->misc_bits= s->misc_bits;
2626 d->mb_intra= s->mb_intra;
2627 d->mb_skipped= s->mb_skipped;
2628 d->mv_type= s->mv_type;
2629 d->mv_dir= s->mv_dir;
2631 if(s->data_partitioning){
2633 d->tex_pb= s->tex_pb;
2637 d->block_last_index[i]= s->block_last_index[i];
2638 d->interlaced_dct= s->interlaced_dct;
2639 d->qscale= s->qscale;
2641 d->esc3_level_length= s->esc3_level_length;
2644 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2645 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2646 int *dmin, int *next_block, int motion_x, int motion_y)
2649 uint8_t *dest_backup[3];
2651 copy_context_before_encode(s, backup, type);
2653 s->block= s->blocks[*next_block];
2654 s->pb= pb[*next_block];
2655 if(s->data_partitioning){
2656 s->pb2 = pb2 [*next_block];
2657 s->tex_pb= tex_pb[*next_block];
2661 memcpy(dest_backup, s->dest, sizeof(s->dest));
2662 s->dest[0] = s->sc.rd_scratchpad;
2663 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2664 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2665 av_assert0(s->linesize >= 32); //FIXME
2668 encode_mb(s, motion_x, motion_y);
2670 score= put_bits_count(&s->pb);
2671 if(s->data_partitioning){
2672 score+= put_bits_count(&s->pb2);
2673 score+= put_bits_count(&s->tex_pb);
2676 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2677 ff_mpv_reconstruct_mb(s, s->block);
2679 score *= s->lambda2;
2680 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2684 memcpy(s->dest, dest_backup, sizeof(s->dest));
2691 copy_context_after_encode(best, s, type);
2695 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2696 const uint32_t *sq = ff_square_tab + 256;
2701 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2702 else if(w==8 && h==8)
2703 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2707 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2716 static int sse_mb(MpegEncContext *s){
2720 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2721 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2724 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2725 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2726 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2727 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2729 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2730 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2731 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2734 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2735 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2736 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2739 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2740 MpegEncContext *s= *(void**)arg;
2744 s->me.dia_size= s->avctx->pre_dia_size;
2745 s->first_slice_line=1;
2746 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2747 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2748 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2750 s->first_slice_line=0;
2758 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2759 MpegEncContext *s= *(void**)arg;
2761 ff_check_alignment();
2763 s->me.dia_size= s->avctx->dia_size;
2764 s->first_slice_line=1;
2765 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2766 s->mb_x=0; //for block init below
2767 ff_init_block_index(s);
2768 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2769 s->block_index[0]+=2;
2770 s->block_index[1]+=2;
2771 s->block_index[2]+=2;
2772 s->block_index[3]+=2;
2774 /* compute motion vector & mb_type and store in context */
2775 if(s->pict_type==AV_PICTURE_TYPE_B)
2776 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2778 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2780 s->first_slice_line=0;
2785 static int mb_var_thread(AVCodecContext *c, void *arg){
2786 MpegEncContext *s= *(void**)arg;
2789 ff_check_alignment();
2791 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2792 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2795 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2797 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2799 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2800 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2802 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2803 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2804 s->me.mb_var_sum_temp += varc;
2810 static void write_slice_end(MpegEncContext *s){
2811 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2812 if(s->partitioned_frame){
2813 ff_mpeg4_merge_partitions(s);
2816 ff_mpeg4_stuffing(&s->pb);
2817 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2818 ff_mjpeg_encode_stuffing(s);
2821 flush_put_bits(&s->pb);
2823 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2824 s->misc_bits+= get_bits_diff(s);
2827 static void write_mb_info(MpegEncContext *s)
2829 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2830 int offset = put_bits_count(&s->pb);
2831 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2832 int gobn = s->mb_y / s->gob_index;
2834 if (CONFIG_H263_ENCODER)
2835 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2836 bytestream_put_le32(&ptr, offset);
2837 bytestream_put_byte(&ptr, s->qscale);
2838 bytestream_put_byte(&ptr, gobn);
2839 bytestream_put_le16(&ptr, mba);
2840 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2841 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2842 /* 4MV not implemented */
2843 bytestream_put_byte(&ptr, 0); /* hmv2 */
2844 bytestream_put_byte(&ptr, 0); /* vmv2 */
2847 static void update_mb_info(MpegEncContext *s, int startcode)
2851 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2852 s->mb_info_size += 12;
2853 s->prev_mb_info = s->last_mb_info;
2856 s->prev_mb_info = put_bits_count(&s->pb)/8;
2857 /* This might have incremented mb_info_size above, and we return without
2858 * actually writing any info into that slot yet. But in that case,
2859 * this will be called again at the start of the after writing the
2860 * start code, actually writing the mb info. */
2864 s->last_mb_info = put_bits_count(&s->pb)/8;
2865 if (!s->mb_info_size)
2866 s->mb_info_size += 12;
2870 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2872 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2873 && s->slice_context_count == 1
2874 && s->pb.buf == s->avctx->internal->byte_buffer) {
2875 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2876 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2878 uint8_t *new_buffer = NULL;
2879 int new_buffer_size = 0;
2881 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2882 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2883 return AVERROR(ENOMEM);
2888 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2889 s->avctx->internal->byte_buffer_size + size_increase);
2891 return AVERROR(ENOMEM);
2893 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2894 av_free(s->avctx->internal->byte_buffer);
2895 s->avctx->internal->byte_buffer = new_buffer;
2896 s->avctx->internal->byte_buffer_size = new_buffer_size;
2897 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2898 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2899 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2901 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2902 return AVERROR(EINVAL);
2906 static int encode_thread(AVCodecContext *c, void *arg){
2907 MpegEncContext *s= *(void**)arg;
2909 int chr_h= 16>>s->chroma_y_shift;
2911 MpegEncContext best_s = { 0 }, backup_s;
2912 uint8_t bit_buf[2][MAX_MB_BYTES];
2913 uint8_t bit_buf2[2][MAX_MB_BYTES];
2914 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2915 PutBitContext pb[2], pb2[2], tex_pb[2];
2917 ff_check_alignment();
2920 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2921 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2922 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2925 s->last_bits= put_bits_count(&s->pb);
2936 /* init last dc values */
2937 /* note: quant matrix value (8) is implied here */
2938 s->last_dc[i] = 128 << s->intra_dc_precision;
2940 s->current_picture.encoding_error[i] = 0;
2942 if(s->codec_id==AV_CODEC_ID_AMV){
2943 s->last_dc[0] = 128*8/13;
2944 s->last_dc[1] = 128*8/14;
2945 s->last_dc[2] = 128*8/14;
2948 memset(s->last_mv, 0, sizeof(s->last_mv));
2952 switch(s->codec_id){
2953 case AV_CODEC_ID_H263:
2954 case AV_CODEC_ID_H263P:
2955 case AV_CODEC_ID_FLV1:
2956 if (CONFIG_H263_ENCODER)
2957 s->gob_index = H263_GOB_HEIGHT(s->height);
2959 case AV_CODEC_ID_MPEG4:
2960 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2961 ff_mpeg4_init_partitions(s);
2967 s->first_slice_line = 1;
2968 s->ptr_lastgob = s->pb.buf;
2969 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2973 ff_set_qscale(s, s->qscale);
2974 ff_init_block_index(s);
2976 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2977 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2978 int mb_type= s->mb_type[xy];
2982 int size_increase = s->avctx->internal->byte_buffer_size/4
2983 + s->mb_width*MAX_MB_BYTES;
2985 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2986 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2987 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2990 if(s->data_partitioning){
2991 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2992 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2993 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2999 s->mb_y = mb_y; // moved into loop, can get changed by H.261
3000 ff_update_block_index(s);
3002 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3003 ff_h261_reorder_mb_index(s);
3004 xy= s->mb_y*s->mb_stride + s->mb_x;
3005 mb_type= s->mb_type[xy];
3008 /* write gob / video packet header */
3010 int current_packet_size, is_gob_start;
3012 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3014 is_gob_start = s->rtp_payload_size &&
3015 current_packet_size >= s->rtp_payload_size &&
3018 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3020 switch(s->codec_id){
3021 case AV_CODEC_ID_H263:
3022 case AV_CODEC_ID_H263P:
3023 if(!s->h263_slice_structured)
3024 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3026 case AV_CODEC_ID_MPEG2VIDEO:
3027 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3028 case AV_CODEC_ID_MPEG1VIDEO:
3029 if(s->mb_skip_run) is_gob_start=0;
3031 case AV_CODEC_ID_MJPEG:
3032 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3037 if(s->start_mb_y != mb_y || mb_x!=0){
3040 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3041 ff_mpeg4_init_partitions(s);
3045 av_assert2((put_bits_count(&s->pb)&7) == 0);
3046 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3048 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3049 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3050 int d = 100 / s->error_rate;
3052 current_packet_size=0;
3053 s->pb.buf_ptr= s->ptr_lastgob;
3054 av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3058 #if FF_API_RTP_CALLBACK
3059 FF_DISABLE_DEPRECATION_WARNINGS
3060 if (s->avctx->rtp_callback){
3061 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3062 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3064 FF_ENABLE_DEPRECATION_WARNINGS
3066 update_mb_info(s, 1);
3068 switch(s->codec_id){
3069 case AV_CODEC_ID_MPEG4:
3070 if (CONFIG_MPEG4_ENCODER) {
3071 ff_mpeg4_encode_video_packet_header(s);
3072 ff_mpeg4_clean_buffers(s);
3075 case AV_CODEC_ID_MPEG1VIDEO:
3076 case AV_CODEC_ID_MPEG2VIDEO:
3077 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3078 ff_mpeg1_encode_slice_header(s);
3079 ff_mpeg1_clean_buffers(s);
3082 case AV_CODEC_ID_H263:
3083 case AV_CODEC_ID_H263P:
3084 if (CONFIG_H263_ENCODER)
3085 ff_h263_encode_gob_header(s, mb_y);
3089 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3090 int bits= put_bits_count(&s->pb);
3091 s->misc_bits+= bits - s->last_bits;
3095 s->ptr_lastgob += current_packet_size;
3096 s->first_slice_line=1;
3097 s->resync_mb_x=mb_x;
3098 s->resync_mb_y=mb_y;
3102 if( (s->resync_mb_x == s->mb_x)
3103 && s->resync_mb_y+1 == s->mb_y){
3104 s->first_slice_line=0;
3108 s->dquant=0; //only for QP_RD
3110 update_mb_info(s, 0);
3112 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3114 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3116 copy_context_before_encode(&backup_s, s, -1);
3118 best_s.data_partitioning= s->data_partitioning;
3119 best_s.partitioned_frame= s->partitioned_frame;
3120 if(s->data_partitioning){
3121 backup_s.pb2= s->pb2;
3122 backup_s.tex_pb= s->tex_pb;
3125 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3126 s->mv_dir = MV_DIR_FORWARD;
3127 s->mv_type = MV_TYPE_16X16;
3129 s->mv[0][0][0] = s->p_mv_table[xy][0];
3130 s->mv[0][0][1] = s->p_mv_table[xy][1];
3131 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3132 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3134 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3135 s->mv_dir = MV_DIR_FORWARD;
3136 s->mv_type = MV_TYPE_FIELD;
3139 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3140 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3141 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3143 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3144 &dmin, &next_block, 0, 0);
3146 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3147 s->mv_dir = MV_DIR_FORWARD;
3148 s->mv_type = MV_TYPE_16X16;
3152 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3153 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3155 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3156 s->mv_dir = MV_DIR_FORWARD;
3157 s->mv_type = MV_TYPE_8X8;
3160 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3161 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3163 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3164 &dmin, &next_block, 0, 0);
3166 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3167 s->mv_dir = MV_DIR_FORWARD;
3168 s->mv_type = MV_TYPE_16X16;
3170 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3171 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3172 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3173 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3175 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3176 s->mv_dir = MV_DIR_BACKWARD;
3177 s->mv_type = MV_TYPE_16X16;
3179 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3180 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3181 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3182 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3184 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3185 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3186 s->mv_type = MV_TYPE_16X16;
3188 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3189 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3190 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3191 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3192 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3193 &dmin, &next_block, 0, 0);
3195 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3196 s->mv_dir = MV_DIR_FORWARD;
3197 s->mv_type = MV_TYPE_FIELD;
3200 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3201 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3202 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3204 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3205 &dmin, &next_block, 0, 0);
3207 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3208 s->mv_dir = MV_DIR_BACKWARD;
3209 s->mv_type = MV_TYPE_FIELD;
3212 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3213 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3214 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3216 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3217 &dmin, &next_block, 0, 0);
3219 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3220 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3221 s->mv_type = MV_TYPE_FIELD;
3223 for(dir=0; dir<2; dir++){
3225 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3226 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3227 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3230 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3231 &dmin, &next_block, 0, 0);
3233 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3235 s->mv_type = MV_TYPE_16X16;
3239 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3240 &dmin, &next_block, 0, 0);
3241 if(s->h263_pred || s->h263_aic){
3243 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3245 ff_clean_intra_table_entries(s); //old mode?
3249 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3250 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3251 const int last_qp= backup_s.qscale;
3254 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3255 static const int dquant_tab[4]={-1,1,-2,2};
3256 int storecoefs = s->mb_intra && s->dc_val[0];
3258 av_assert2(backup_s.dquant == 0);
3261 s->mv_dir= best_s.mv_dir;
3262 s->mv_type = MV_TYPE_16X16;
3263 s->mb_intra= best_s.mb_intra;
3264 s->mv[0][0][0] = best_s.mv[0][0][0];
3265 s->mv[0][0][1] = best_s.mv[0][0][1];
3266 s->mv[1][0][0] = best_s.mv[1][0][0];
3267 s->mv[1][0][1] = best_s.mv[1][0][1];
3269 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3270 for(; qpi<4; qpi++){
3271 int dquant= dquant_tab[qpi];
3272 qp= last_qp + dquant;
3273 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3275 backup_s.dquant= dquant;
3278 dc[i]= s->dc_val[0][ s->block_index[i] ];
3279 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3283 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3284 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3285 if(best_s.qscale != qp){
3288 s->dc_val[0][ s->block_index[i] ]= dc[i];
3289 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3296 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3297 int mx= s->b_direct_mv_table[xy][0];
3298 int my= s->b_direct_mv_table[xy][1];
3300 backup_s.dquant = 0;
3301 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3303 ff_mpeg4_set_direct_mv(s, mx, my);
3304 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3305 &dmin, &next_block, mx, my);
3307 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3308 backup_s.dquant = 0;
3309 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3311 ff_mpeg4_set_direct_mv(s, 0, 0);
3312 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3313 &dmin, &next_block, 0, 0);
3315 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3318 coded |= s->block_last_index[i];
3321 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3322 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3323 mx=my=0; //FIXME find the one we actually used
3324 ff_mpeg4_set_direct_mv(s, mx, my);
3325 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3333 s->mv_dir= best_s.mv_dir;
3334 s->mv_type = best_s.mv_type;
3336 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3337 s->mv[0][0][1] = best_s.mv[0][0][1];
3338 s->mv[1][0][0] = best_s.mv[1][0][0];
3339 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3342 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3343 &dmin, &next_block, mx, my);
3348 s->current_picture.qscale_table[xy] = best_s.qscale;
3350 copy_context_after_encode(s, &best_s, -1);
3352 pb_bits_count= put_bits_count(&s->pb);
3353 flush_put_bits(&s->pb);
3354 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3357 if(s->data_partitioning){
3358 pb2_bits_count= put_bits_count(&s->pb2);
3359 flush_put_bits(&s->pb2);
3360 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3361 s->pb2= backup_s.pb2;
3363 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3364 flush_put_bits(&s->tex_pb);
3365 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3366 s->tex_pb= backup_s.tex_pb;
3368 s->last_bits= put_bits_count(&s->pb);
3370 if (CONFIG_H263_ENCODER &&
3371 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3372 ff_h263_update_motion_val(s);
3374 if(next_block==0){ //FIXME 16 vs linesize16
3375 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3376 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3377 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3380 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3381 ff_mpv_reconstruct_mb(s, s->block);
3383 int motion_x = 0, motion_y = 0;
3384 s->mv_type=MV_TYPE_16X16;
3385 // only one MB-Type possible
3388 case CANDIDATE_MB_TYPE_INTRA:
3391 motion_x= s->mv[0][0][0] = 0;
3392 motion_y= s->mv[0][0][1] = 0;
3394 case CANDIDATE_MB_TYPE_INTER:
3395 s->mv_dir = MV_DIR_FORWARD;
3397 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3398 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3400 case CANDIDATE_MB_TYPE_INTER_I:
3401 s->mv_dir = MV_DIR_FORWARD;
3402 s->mv_type = MV_TYPE_FIELD;
3405 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3406 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3407 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3410 case CANDIDATE_MB_TYPE_INTER4V:
3411 s->mv_dir = MV_DIR_FORWARD;
3412 s->mv_type = MV_TYPE_8X8;
3415 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3416 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3419 case CANDIDATE_MB_TYPE_DIRECT:
3420 if (CONFIG_MPEG4_ENCODER) {
3421 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3423 motion_x=s->b_direct_mv_table[xy][0];
3424 motion_y=s->b_direct_mv_table[xy][1];
3425 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3428 case CANDIDATE_MB_TYPE_DIRECT0:
3429 if (CONFIG_MPEG4_ENCODER) {
3430 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3432 ff_mpeg4_set_direct_mv(s, 0, 0);
3435 case CANDIDATE_MB_TYPE_BIDIR:
3436 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3438 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3439 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3440 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3441 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3443 case CANDIDATE_MB_TYPE_BACKWARD:
3444 s->mv_dir = MV_DIR_BACKWARD;
3446 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3447 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3449 case CANDIDATE_MB_TYPE_FORWARD:
3450 s->mv_dir = MV_DIR_FORWARD;
3452 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3453 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3455 case CANDIDATE_MB_TYPE_FORWARD_I:
3456 s->mv_dir = MV_DIR_FORWARD;
3457 s->mv_type = MV_TYPE_FIELD;
3460 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3461 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3462 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3465 case CANDIDATE_MB_TYPE_BACKWARD_I:
3466 s->mv_dir = MV_DIR_BACKWARD;
3467 s->mv_type = MV_TYPE_FIELD;
3470 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3471 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3472 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3475 case CANDIDATE_MB_TYPE_BIDIR_I:
3476 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3477 s->mv_type = MV_TYPE_FIELD;
3479 for(dir=0; dir<2; dir++){
3481 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3482 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3483 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3488 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3491 encode_mb(s, motion_x, motion_y);
3493 // RAL: Update last macroblock type
3494 s->last_mv_dir = s->mv_dir;
3496 if (CONFIG_H263_ENCODER &&
3497 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3498 ff_h263_update_motion_val(s);
3500 ff_mpv_reconstruct_mb(s, s->block);
3503 /* clean the MV table in IPS frames for direct mode in B-frames */
3504 if(s->mb_intra /* && I,P,S_TYPE */){
3505 s->p_mv_table[xy][0]=0;
3506 s->p_mv_table[xy][1]=0;
3509 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3513 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3514 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3516 s->current_picture.encoding_error[0] += sse(
3517 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3518 s->dest[0], w, h, s->linesize);
3519 s->current_picture.encoding_error[1] += sse(
3520 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3521 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3522 s->current_picture.encoding_error[2] += sse(
3523 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3524 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3527 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3528 ff_h263_loop_filter(s);
3530 ff_dlog(s->avctx, "MB %d %d bits\n",
3531 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3535 //not beautiful here but we must write it before flushing so it has to be here
3536 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3537 ff_msmpeg4_encode_ext_header(s);
3541 #if FF_API_RTP_CALLBACK
3542 FF_DISABLE_DEPRECATION_WARNINGS
3543 /* Send the last GOB if RTP */
3544 if (s->avctx->rtp_callback) {
3545 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3546 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3547 /* Call the RTP callback to send the last GOB */
3549 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3551 FF_ENABLE_DEPRECATION_WARNINGS
3557 #define MERGE(field) dst->field += src->field; src->field=0
3558 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3559 MERGE(me.scene_change_score);
3560 MERGE(me.mc_mb_var_sum_temp);
3561 MERGE(me.mb_var_sum_temp);
3564 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3567 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3568 MERGE(dct_count[1]);
3577 MERGE(er.error_count);
3578 MERGE(padding_bug_score);
3579 MERGE(current_picture.encoding_error[0]);
3580 MERGE(current_picture.encoding_error[1]);
3581 MERGE(current_picture.encoding_error[2]);
3583 if (dst->noise_reduction){
3584 for(i=0; i<64; i++){
3585 MERGE(dct_error_sum[0][i]);
3586 MERGE(dct_error_sum[1][i]);
3590 av_assert1(put_bits_count(&src->pb) % 8 ==0);
3591 av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3592 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3593 flush_put_bits(&dst->pb);
3596 static int estimate_qp(MpegEncContext *s, int dry_run){
3597 if (s->next_lambda){
3598 s->current_picture_ptr->f->quality =
3599 s->current_picture.f->quality = s->next_lambda;
3600 if(!dry_run) s->next_lambda= 0;
3601 } else if (!s->fixed_qscale) {
3602 int quality = ff_rate_estimate_qscale(s, dry_run);
3603 s->current_picture_ptr->f->quality =
3604 s->current_picture.f->quality = quality;
3605 if (s->current_picture.f->quality < 0)
3609 if(s->adaptive_quant){
3610 switch(s->codec_id){
3611 case AV_CODEC_ID_MPEG4:
3612 if (CONFIG_MPEG4_ENCODER)
3613 ff_clean_mpeg4_qscales(s);
3615 case AV_CODEC_ID_H263:
3616 case AV_CODEC_ID_H263P:
3617 case AV_CODEC_ID_FLV1:
3618 if (CONFIG_H263_ENCODER)
3619 ff_clean_h263_qscales(s);
3622 ff_init_qscale_tab(s);
3625 s->lambda= s->lambda_table[0];
3628 s->lambda = s->current_picture.f->quality;
3633 /* must be called before writing the header */
3634 static void set_frame_distances(MpegEncContext * s){
3635 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3636 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3638 if(s->pict_type==AV_PICTURE_TYPE_B){
3639 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3640 av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3642 s->pp_time= s->time - s->last_non_b_time;
3643 s->last_non_b_time= s->time;
3644 av_assert1(s->picture_number==0 || s->pp_time > 0);
3648 static int encode_picture(MpegEncContext *s, int picture_number)
3652 int context_count = s->slice_context_count;
3654 s->picture_number = picture_number;
3656 /* Reset the average MB variance */
3657 s->me.mb_var_sum_temp =
3658 s->me.mc_mb_var_sum_temp = 0;
3660 /* we need to initialize some time vars before we can encode B-frames */
3661 // RAL: Condition added for MPEG1VIDEO
3662 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3663 set_frame_distances(s);
3664 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3665 ff_set_mpeg4_time(s);
3667 s->me.scene_change_score=0;
3669 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3671 if(s->pict_type==AV_PICTURE_TYPE_I){
3672 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3673 else s->no_rounding=0;
3674 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3675 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3676 s->no_rounding ^= 1;
3679 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3680 if (estimate_qp(s,1) < 0)
3682 ff_get_2pass_fcode(s);
3683 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3684 if(s->pict_type==AV_PICTURE_TYPE_B)
3685 s->lambda= s->last_lambda_for[s->pict_type];
3687 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3691 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3692 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3693 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3694 s->q_chroma_intra_matrix = s->q_intra_matrix;
3695 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3698 s->mb_intra=0; //for the rate distortion & bit compare functions
3699 for(i=1; i<context_count; i++){
3700 ret = ff_update_duplicate_context(s->thread_context[i], s);
3708 /* Estimate motion for every MB */
3709 if(s->pict_type != AV_PICTURE_TYPE_I){
3710 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3711 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3712 if (s->pict_type != AV_PICTURE_TYPE_B) {
3713 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3715 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3719 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3720 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3722 for(i=0; i<s->mb_stride*s->mb_height; i++)
3723 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3725 if(!s->fixed_qscale){
3726 /* finding spatial complexity for I-frame rate control */
3727 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3730 for(i=1; i<context_count; i++){
3731 merge_context_after_me(s, s->thread_context[i]);
3733 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3734 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3737 if (s->me.scene_change_score > s->scenechange_threshold &&
3738 s->pict_type == AV_PICTURE_TYPE_P) {
3739 s->pict_type= AV_PICTURE_TYPE_I;
3740 for(i=0; i<s->mb_stride*s->mb_height; i++)
3741 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3742 if(s->msmpeg4_version >= 3)
3744 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3745 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3749 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3750 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3752 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3754 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3755 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3756 s->f_code= FFMAX3(s->f_code, a, b);
3759 ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
3760 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3761 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3765 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3766 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3771 if(s->pict_type==AV_PICTURE_TYPE_B){
3774 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3775 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3776 s->f_code = FFMAX(a, b);
3778 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3779 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3780 s->b_code = FFMAX(a, b);
3782 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3783 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3784 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3785 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3786 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3788 for(dir=0; dir<2; dir++){
3791 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3792 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3793 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3794 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3802 if (estimate_qp(s, 0) < 0)
3805 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3806 s->pict_type == AV_PICTURE_TYPE_I &&
3807 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3808 s->qscale= 3; //reduce clipping problems
3810 if (s->out_format == FMT_MJPEG) {
3811 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3812 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3814 if (s->avctx->intra_matrix) {
3816 luma_matrix = s->avctx->intra_matrix;
3818 if (s->avctx->chroma_intra_matrix)
3819 chroma_matrix = s->avctx->chroma_intra_matrix;
3821 /* for mjpeg, we do include qscale in the matrix */
3823 int j = s->idsp.idct_permutation[i];
3825 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3826 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3828 s->y_dc_scale_table=
3829 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3830 s->chroma_intra_matrix[0] =
3831 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3832 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3833 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3834 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3835 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3838 if(s->codec_id == AV_CODEC_ID_AMV){
3839 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3840 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3842 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3844 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3845 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3847 s->y_dc_scale_table= y;
3848 s->c_dc_scale_table= c;
3849 s->intra_matrix[0] = 13;
3850 s->chroma_intra_matrix[0] = 14;
3851 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3852 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3853 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3854 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3858 //FIXME var duplication
3859 s->current_picture_ptr->f->key_frame =
3860 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3861 s->current_picture_ptr->f->pict_type =
3862 s->current_picture.f->pict_type = s->pict_type;
3864 if (s->current_picture.f->key_frame)
3865 s->picture_in_gop_number=0;
3867 s->mb_x = s->mb_y = 0;
3868 s->last_bits= put_bits_count(&s->pb);
3869 switch(s->out_format) {
3871 if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3872 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3873 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3876 if (CONFIG_H261_ENCODER)
3877 ff_h261_encode_picture_header(s, picture_number);
3880 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3881 ff_wmv2_encode_picture_header(s, picture_number);
3882 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3883 ff_msmpeg4_encode_picture_header(s, picture_number);
3884 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3885 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3888 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3889 ret = ff_rv10_encode_picture_header(s, picture_number);
3893 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3894 ff_rv20_encode_picture_header(s, picture_number);
3895 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3896 ff_flv_encode_picture_header(s, picture_number);
3897 else if (CONFIG_H263_ENCODER)
3898 ff_h263_encode_picture_header(s, picture_number);
3901 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3902 ff_mpeg1_encode_picture_header(s, picture_number);
3907 bits= put_bits_count(&s->pb);
3908 s->header_bits= bits - s->last_bits;
3910 for(i=1; i<context_count; i++){
3911 update_duplicate_context_after_me(s->thread_context[i], s);
3913 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3914 for(i=1; i<context_count; i++){
3915 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3916 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3917 merge_context_after_encode(s, s->thread_context[i]);
3923 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3924 const int intra= s->mb_intra;
3927 s->dct_count[intra]++;
3929 for(i=0; i<64; i++){
3930 int level= block[i];
3934 s->dct_error_sum[intra][i] += level;
3935 level -= s->dct_offset[intra][i];
3936 if(level<0) level=0;
3938 s->dct_error_sum[intra][i] -= level;
3939 level += s->dct_offset[intra][i];
3940 if(level>0) level=0;
3947 static int dct_quantize_trellis_c(MpegEncContext *s,
3948 int16_t *block, int n,
3949 int qscale, int *overflow){
3951 const uint16_t *matrix;
3952 const uint8_t *scantable;
3953 const uint8_t *perm_scantable;
3955 unsigned int threshold1, threshold2;
3967 int coeff_count[64];
3968 int qmul, qadd, start_i, last_non_zero, i, dc;
3969 const int esc_length= s->ac_esc_length;
3971 uint8_t * last_length;
3972 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3975 s->fdsp.fdct(block);
3977 if(s->dct_error_sum)
3978 s->denoise_dct(s, block);
3980 qadd= ((qscale-1)|1)*8;
3982 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3983 else mpeg2_qscale = qscale << 1;
3987 scantable= s->intra_scantable.scantable;
3988 perm_scantable= s->intra_scantable.permutated;
3996 /* For AIC we skip quant/dequant of INTRADC */
4001 /* note: block[0] is assumed to be positive */
4002 block[0] = (block[0] + (q >> 1)) / q;
4005 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4006 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4007 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4008 bias= 1<<(QMAT_SHIFT-1);
4010 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4011 length = s->intra_chroma_ac_vlc_length;
4012 last_length= s->intra_chroma_ac_vlc_last_length;
4014 length = s->intra_ac_vlc_length;
4015 last_length= s->intra_ac_vlc_last_length;
4018 scantable= s->inter_scantable.scantable;
4019 perm_scantable= s->inter_scantable.permutated;
4022 qmat = s->q_inter_matrix[qscale];
4023 matrix = s->inter_matrix;
4024 length = s->inter_ac_vlc_length;
4025 last_length= s->inter_ac_vlc_last_length;
4029 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4030 threshold2= (threshold1<<1);
4032 for(i=63; i>=start_i; i--) {
4033 const int j = scantable[i];
4034 int level = block[j] * qmat[j];
4036 if(((unsigned)(level+threshold1))>threshold2){
4042 for(i=start_i; i<=last_non_zero; i++) {
4043 const int j = scantable[i];
4044 int level = block[j] * qmat[j];
4046 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4047 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4048 if(((unsigned)(level+threshold1))>threshold2){
4050 level= (bias + level)>>QMAT_SHIFT;
4052 coeff[1][i]= level-1;
4053 // coeff[2][k]= level-2;
4055 level= (bias - level)>>QMAT_SHIFT;
4056 coeff[0][i]= -level;
4057 coeff[1][i]= -level+1;
4058 // coeff[2][k]= -level+2;
4060 coeff_count[i]= FFMIN(level, 2);
4061 av_assert2(coeff_count[i]);
4064 coeff[0][i]= (level>>31)|1;
4069 *overflow= s->max_qcoeff < max; //overflow might have happened
4071 if(last_non_zero < start_i){
4072 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4073 return last_non_zero;
4076 score_tab[start_i]= 0;
4077 survivor[0]= start_i;
4080 for(i=start_i; i<=last_non_zero; i++){
4081 int level_index, j, zero_distortion;
4082 int dct_coeff= FFABS(block[ scantable[i] ]);
4083 int best_score=256*256*256*120;
4085 if (s->fdsp.fdct == ff_fdct_ifast)
4086 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4087 zero_distortion= dct_coeff*dct_coeff;
4089 for(level_index=0; level_index < coeff_count[i]; level_index++){
4091 int level= coeff[level_index][i];
4092 const int alevel= FFABS(level);
4097 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4098 unquant_coeff= alevel*qmul + qadd;
4099 } else if(s->out_format == FMT_MJPEG) {
4100 j = s->idsp.idct_permutation[scantable[i]];
4101 unquant_coeff = alevel * matrix[j] * 8;
4103 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4105 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4106 unquant_coeff = (unquant_coeff - 1) | 1;
4108 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4109 unquant_coeff = (unquant_coeff - 1) | 1;
4114 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4116 if((level&(~127)) == 0){
4117 for(j=survivor_count-1; j>=0; j--){
4118 int run= i - survivor[j];
4119 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4120 score += score_tab[i-run];
4122 if(score < best_score){
4125 level_tab[i+1]= level-64;
4129 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4130 for(j=survivor_count-1; j>=0; j--){
4131 int run= i - survivor[j];
4132 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4133 score += score_tab[i-run];
4134 if(score < last_score){
4137 last_level= level-64;
4143 distortion += esc_length*lambda;
4144 for(j=survivor_count-1; j>=0; j--){
4145 int run= i - survivor[j];
4146 int score= distortion + score_tab[i-run];
4148 if(score < best_score){
4151 level_tab[i+1]= level-64;
4155 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4156 for(j=survivor_count-1; j>=0; j--){
4157 int run= i - survivor[j];
4158 int score= distortion + score_tab[i-run];
4159 if(score < last_score){
4162 last_level= level-64;
4170 score_tab[i+1]= best_score;
4172 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4173 if(last_non_zero <= 27){
4174 for(; survivor_count; survivor_count--){
4175 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4179 for(; survivor_count; survivor_count--){
4180 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4185 survivor[ survivor_count++ ]= i+1;
4188 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4189 last_score= 256*256*256*120;
4190 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4191 int score= score_tab[i];
4193 score += lambda * 2; // FIXME more exact?
4195 if(score < last_score){
4198 last_level= level_tab[i];
4199 last_run= run_tab[i];
4204 s->coded_score[n] = last_score;
4206 dc= FFABS(block[0]);
4207 last_non_zero= last_i - 1;
4208 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4210 if(last_non_zero < start_i)
4211 return last_non_zero;
4213 if(last_non_zero == 0 && start_i == 0){
4215 int best_score= dc * dc;
4217 for(i=0; i<coeff_count[0]; i++){
4218 int level= coeff[i][0];
4219 int alevel= FFABS(level);
4220 int unquant_coeff, score, distortion;
4222 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4223 unquant_coeff= (alevel*qmul + qadd)>>3;
4225 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4226 unquant_coeff = (unquant_coeff - 1) | 1;
4228 unquant_coeff = (unquant_coeff + 4) >> 3;
4229 unquant_coeff<<= 3 + 3;
4231 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4233 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4234 else score= distortion + esc_length*lambda;
4236 if(score < best_score){
4238 best_level= level - 64;
4241 block[0]= best_level;
4242 s->coded_score[n] = best_score - dc*dc;
4243 if(best_level == 0) return -1;
4244 else return last_non_zero;
4248 av_assert2(last_level);
4250 block[ perm_scantable[last_non_zero] ]= last_level;
4253 for(; i>start_i; i -= run_tab[i] + 1){
4254 block[ perm_scantable[i-1] ]= level_tab[i];
4257 return last_non_zero;
4260 static int16_t basis[64][64];
4262 static void build_basis(uint8_t *perm){
4269 double s= 0.25*(1<<BASIS_SHIFT);
4271 int perm_index= perm[index];
4272 if(i==0) s*= sqrt(0.5);
4273 if(j==0) s*= sqrt(0.5);
4274 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4281 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4282 int16_t *block, int16_t *weight, int16_t *orig,
4285 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4286 const uint8_t *scantable;
4287 const uint8_t *perm_scantable;
4288 // unsigned int threshold1, threshold2;
4293 int qmul, qadd, start_i, last_non_zero, i, dc;
4295 uint8_t * last_length;
4297 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4299 if(basis[0][0] == 0)
4300 build_basis(s->idsp.idct_permutation);
4305 scantable= s->intra_scantable.scantable;
4306 perm_scantable= s->intra_scantable.permutated;
4313 /* For AIC we skip quant/dequant of INTRADC */
4317 q <<= RECON_SHIFT-3;
4318 /* note: block[0] is assumed to be positive */
4320 // block[0] = (block[0] + (q >> 1)) / q;
4322 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4323 // bias= 1<<(QMAT_SHIFT-1);
4324 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4325 length = s->intra_chroma_ac_vlc_length;
4326 last_length= s->intra_chroma_ac_vlc_last_length;
4328 length = s->intra_ac_vlc_length;
4329 last_length= s->intra_ac_vlc_last_length;
4332 scantable= s->inter_scantable.scantable;
4333 perm_scantable= s->inter_scantable.permutated;
4336 length = s->inter_ac_vlc_length;
4337 last_length= s->inter_ac_vlc_last_length;
4339 last_non_zero = s->block_last_index[n];
4341 dc += (1<<(RECON_SHIFT-1));
4342 for(i=0; i<64; i++){
4343 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4347 for(i=0; i<64; i++){
4352 w= FFABS(weight[i]) + qns*one;
4353 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4356 // w=weight[i] = (63*qns + (w/2)) / w;
4359 av_assert2(w<(1<<6));
4362 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4366 for(i=start_i; i<=last_non_zero; i++){
4367 int j= perm_scantable[i];
4368 const int level= block[j];
4372 if(level<0) coeff= qmul*level - qadd;
4373 else coeff= qmul*level + qadd;
4374 run_tab[rle_index++]=run;
4377 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4384 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4387 int run2, best_unquant_change=0, analyze_gradient;
4388 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4390 if(analyze_gradient){
4391 for(i=0; i<64; i++){
4394 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4400 const int level= block[0];
4401 int change, old_coeff;
4403 av_assert2(s->mb_intra);
4407 for(change=-1; change<=1; change+=2){
4408 int new_level= level + change;
4409 int score, new_coeff;
4411 new_coeff= q*new_level;
4412 if(new_coeff >= 2048 || new_coeff < 0)
4415 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4416 new_coeff - old_coeff);
4417 if(score<best_score){
4420 best_change= change;
4421 best_unquant_change= new_coeff - old_coeff;
4428 run2= run_tab[rle_index++];
4432 for(i=start_i; i<64; i++){
4433 int j= perm_scantable[i];
4434 const int level= block[j];
4435 int change, old_coeff;
4437 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4441 if(level<0) old_coeff= qmul*level - qadd;
4442 else old_coeff= qmul*level + qadd;
4443 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4447 av_assert2(run2>=0 || i >= last_non_zero );
4450 for(change=-1; change<=1; change+=2){
4451 int new_level= level + change;
4452 int score, new_coeff, unquant_change;
4455 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4459 if(new_level<0) new_coeff= qmul*new_level - qadd;
4460 else new_coeff= qmul*new_level + qadd;
4461 if(new_coeff >= 2048 || new_coeff <= -2048)
4463 //FIXME check for overflow
4466 if(level < 63 && level > -63){
4467 if(i < last_non_zero)
4468 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4469 - length[UNI_AC_ENC_INDEX(run, level+64)];
4471 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4472 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4475 av_assert2(FFABS(new_level)==1);
4477 if(analyze_gradient){
4478 int g= d1[ scantable[i] ];
4479 if(g && (g^new_level) >= 0)
4483 if(i < last_non_zero){
4484 int next_i= i + run2 + 1;
4485 int next_level= block[ perm_scantable[next_i] ] + 64;
4487 if(next_level&(~127))
4490 if(next_i < last_non_zero)
4491 score += length[UNI_AC_ENC_INDEX(run, 65)]
4492 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4493 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4495 score += length[UNI_AC_ENC_INDEX(run, 65)]
4496 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4497 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4499 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4501 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4502 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4508 av_assert2(FFABS(level)==1);
4510 if(i < last_non_zero){
4511 int next_i= i + run2 + 1;
4512 int next_level= block[ perm_scantable[next_i] ] + 64;
4514 if(next_level&(~127))
4517 if(next_i < last_non_zero)
4518 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4519 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4520 - length[UNI_AC_ENC_INDEX(run, 65)];
4522 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4523 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4524 - length[UNI_AC_ENC_INDEX(run, 65)];
4526 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4528 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4529 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4536 unquant_change= new_coeff - old_coeff;
4537 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4539 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4541 if(score<best_score){
4544 best_change= change;
4545 best_unquant_change= unquant_change;
4549 prev_level= level + 64;
4550 if(prev_level&(~127))
4560 int j= perm_scantable[ best_coeff ];
4562 block[j] += best_change;
4564 if(best_coeff > last_non_zero){
4565 last_non_zero= best_coeff;
4566 av_assert2(block[j]);
4568 for(; last_non_zero>=start_i; last_non_zero--){
4569 if(block[perm_scantable[last_non_zero]])
4576 for(i=start_i; i<=last_non_zero; i++){
4577 int j= perm_scantable[i];
4578 const int level= block[j];
4581 run_tab[rle_index++]=run;
4588 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4594 return last_non_zero;
4598 * Permute an 8x8 block according to permutation.
4599 * @param block the block which will be permuted according to
4600 * the given permutation vector
4601 * @param permutation the permutation vector
4602 * @param last the last non zero coefficient in scantable order, used to
4603 * speed the permutation up
4604 * @param scantable the used scantable, this is only used to speed the
4605 * permutation up, the block is not (inverse) permutated
4606 * to scantable order!
4608 void ff_block_permute(int16_t *block, uint8_t *permutation,
4609 const uint8_t *scantable, int last)
4616 //FIXME it is ok but not clean and might fail for some permutations
4617 // if (permutation[1] == 1)
4620 for (i = 0; i <= last; i++) {
4621 const int j = scantable[i];
4626 for (i = 0; i <= last; i++) {
4627 const int j = scantable[i];
4628 const int perm_j = permutation[j];
4629 block[perm_j] = temp[j];
4633 int ff_dct_quantize_c(MpegEncContext *s,
4634 int16_t *block, int n,
4635 int qscale, int *overflow)
4637 int i, j, level, last_non_zero, q, start_i;
4639 const uint8_t *scantable;
4642 unsigned int threshold1, threshold2;
4644 s->fdsp.fdct(block);
4646 if(s->dct_error_sum)
4647 s->denoise_dct(s, block);
4650 scantable= s->intra_scantable.scantable;
4658 /* For AIC we skip quant/dequant of INTRADC */
4661 /* note: block[0] is assumed to be positive */
4662 block[0] = (block[0] + (q >> 1)) / q;
4665 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4666 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4668 scantable= s->inter_scantable.scantable;
4671 qmat = s->q_inter_matrix[qscale];
4672 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4674 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4675 threshold2= (threshold1<<1);
4676 for(i=63;i>=start_i;i--) {
4678 level = block[j] * qmat[j];
4680 if(((unsigned)(level+threshold1))>threshold2){
4687 for(i=start_i; i<=last_non_zero; i++) {
4689 level = block[j] * qmat[j];
4691 // if( bias+level >= (1<<QMAT_SHIFT)
4692 // || bias-level >= (1<<QMAT_SHIFT)){
4693 if(((unsigned)(level+threshold1))>threshold2){
4695 level= (bias + level)>>QMAT_SHIFT;
4698 level= (bias - level)>>QMAT_SHIFT;
4706 *overflow= s->max_qcoeff < max; //overflow might have happened
4708 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4709 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4710 ff_block_permute(block, s->idsp.idct_permutation,
4711 scantable, last_non_zero);
4713 return last_non_zero;
4716 #define OFFSET(x) offsetof(MpegEncContext, x)
4717 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4718 static const AVOption h263_options[] = {
4719 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4720 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4725 static const AVClass h263_class = {
4726 .class_name = "H.263 encoder",
4727 .item_name = av_default_item_name,
4728 .option = h263_options,
4729 .version = LIBAVUTIL_VERSION_INT,
4732 AVCodec ff_h263_encoder = {
4734 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4735 .type = AVMEDIA_TYPE_VIDEO,
4736 .id = AV_CODEC_ID_H263,
4737 .priv_data_size = sizeof(MpegEncContext),
4738 .init = ff_mpv_encode_init,
4739 .encode2 = ff_mpv_encode_picture,
4740 .close = ff_mpv_encode_end,
4741 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4742 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4743 .priv_class = &h263_class,
4746 static const AVOption h263p_options[] = {
4747 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4748 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4749 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4750 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4754 static const AVClass h263p_class = {
4755 .class_name = "H.263p encoder",
4756 .item_name = av_default_item_name,
4757 .option = h263p_options,
4758 .version = LIBAVUTIL_VERSION_INT,
4761 AVCodec ff_h263p_encoder = {
4763 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4764 .type = AVMEDIA_TYPE_VIDEO,
4765 .id = AV_CODEC_ID_H263P,
4766 .priv_data_size = sizeof(MpegEncContext),
4767 .init = ff_mpv_encode_init,
4768 .encode2 = ff_mpv_encode_picture,
4769 .close = ff_mpv_encode_end,
4770 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4771 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4772 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4773 .priv_class = &h263p_class,
4776 static const AVClass msmpeg4v2_class = {
4777 .class_name = "msmpeg4v2 encoder",
4778 .item_name = av_default_item_name,
4779 .option = ff_mpv_generic_options,
4780 .version = LIBAVUTIL_VERSION_INT,
4783 AVCodec ff_msmpeg4v2_encoder = {
4784 .name = "msmpeg4v2",
4785 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4786 .type = AVMEDIA_TYPE_VIDEO,
4787 .id = AV_CODEC_ID_MSMPEG4V2,
4788 .priv_data_size = sizeof(MpegEncContext),
4789 .init = ff_mpv_encode_init,
4790 .encode2 = ff_mpv_encode_picture,
4791 .close = ff_mpv_encode_end,
4792 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4793 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4794 .priv_class = &msmpeg4v2_class,
4797 static const AVClass msmpeg4v3_class = {
4798 .class_name = "msmpeg4v3 encoder",
4799 .item_name = av_default_item_name,
4800 .option = ff_mpv_generic_options,
4801 .version = LIBAVUTIL_VERSION_INT,
4804 AVCodec ff_msmpeg4v3_encoder = {
4806 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4807 .type = AVMEDIA_TYPE_VIDEO,
4808 .id = AV_CODEC_ID_MSMPEG4V3,
4809 .priv_data_size = sizeof(MpegEncContext),
4810 .init = ff_mpv_encode_init,
4811 .encode2 = ff_mpv_encode_picture,
4812 .close = ff_mpv_encode_end,
4813 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4814 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4815 .priv_class = &msmpeg4v3_class,
4818 static const AVClass wmv1_class = {
4819 .class_name = "wmv1 encoder",
4820 .item_name = av_default_item_name,
4821 .option = ff_mpv_generic_options,
4822 .version = LIBAVUTIL_VERSION_INT,
4825 AVCodec ff_wmv1_encoder = {
4827 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4828 .type = AVMEDIA_TYPE_VIDEO,
4829 .id = AV_CODEC_ID_WMV1,
4830 .priv_data_size = sizeof(MpegEncContext),
4831 .init = ff_mpv_encode_init,
4832 .encode2 = ff_mpv_encode_picture,
4833 .close = ff_mpv_encode_end,
4834 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4835 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4836 .priv_class = &wmv1_class,