2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/mem_internal.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/opt.h"
46 #include "mpegvideo.h"
47 #include "mpegvideodata.h"
51 #include "mjpegenc_common.h"
53 #include "mpegutils.h"
55 #include "speedhqenc.h"
57 #include "pixblockdsp.h"
61 #include "aandcttab.h"
63 #include "mpeg4video.h"
65 #include "bytestream.h"
68 #include "packet_internal.h"
73 #define QUANT_BIAS_SHIFT 8
75 #define QMAT_SHIFT_MMX 16
78 static int encode_picture(MpegEncContext *s, int picture_number);
79 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
80 static int sse_mb(MpegEncContext *s);
81 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
82 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
84 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
85 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
87 const AVOption ff_mpv_generic_options[] = {
92 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
93 uint16_t (*qmat16)[2][64],
94 const uint16_t *quant_matrix,
95 int bias, int qmin, int qmax, int intra)
97 FDCTDSPContext *fdsp = &s->fdsp;
101 for (qscale = qmin; qscale <= qmax; qscale++) {
105 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
106 else qscale2 = qscale << 1;
108 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
110 fdsp->fdct == ff_faandct ||
111 #endif /* CONFIG_FAANDCT */
112 fdsp->fdct == ff_jpeg_fdct_islow_10) {
113 for (i = 0; i < 64; i++) {
114 const int j = s->idsp.idct_permutation[i];
115 int64_t den = (int64_t) qscale2 * quant_matrix[j];
116 /* 16 <= qscale * quant_matrix[i] <= 7905
117 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
118 * 19952 <= x <= 249205026
119 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
120 * 3444240 >= (1 << 36) / (x) >= 275 */
122 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
124 } else if (fdsp->fdct == ff_fdct_ifast) {
125 for (i = 0; i < 64; i++) {
126 const int j = s->idsp.idct_permutation[i];
127 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
128 /* 16 <= qscale * quant_matrix[i] <= 7905
129 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
130 * 19952 <= x <= 249205026
131 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
132 * 3444240 >= (1 << 36) / (x) >= 275 */
134 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
137 for (i = 0; i < 64; i++) {
138 const int j = s->idsp.idct_permutation[i];
139 int64_t den = (int64_t) qscale2 * quant_matrix[j];
140 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
141 * Assume x = qscale * quant_matrix[i]
143 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
144 * so 32768 >= (1 << 19) / (x) >= 67 */
145 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
146 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
147 // (qscale * quant_matrix[i]);
148 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
150 if (qmat16[qscale][0][i] == 0 ||
151 qmat16[qscale][0][i] == 128 * 256)
152 qmat16[qscale][0][i] = 128 * 256 - 1;
153 qmat16[qscale][1][i] =
154 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
155 qmat16[qscale][0][i]);
159 for (i = intra; i < 64; i++) {
161 if (fdsp->fdct == ff_fdct_ifast) {
162 max = (8191LL * ff_aanscales[i]) >> 14;
164 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
170 av_log(s->avctx, AV_LOG_INFO,
171 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
176 static inline void update_qscale(MpegEncContext *s)
178 if (s->q_scale_type == 1 && 0) {
180 int bestdiff=INT_MAX;
183 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
184 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
185 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
186 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
188 if (diff < bestdiff) {
195 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
196 (FF_LAMBDA_SHIFT + 7);
197 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
200 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
204 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
210 for (i = 0; i < 64; i++) {
211 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
218 * init s->current_picture.qscale_table from s->lambda_table
220 void ff_init_qscale_tab(MpegEncContext *s)
222 int8_t * const qscale_table = s->current_picture.qscale_table;
225 for (i = 0; i < s->mb_num; i++) {
226 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
227 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
228 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
233 static void update_duplicate_context_after_me(MpegEncContext *dst,
236 #define COPY(a) dst->a= src->a
238 COPY(current_picture);
244 COPY(picture_in_gop_number);
245 COPY(gop_picture_number);
246 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
247 COPY(progressive_frame); // FIXME don't set in encode_header
248 COPY(partitioned_frame); // FIXME don't set in encode_header
253 * Set the given MpegEncContext to defaults for encoding.
254 * the changed fields will not depend upon the prior state of the MpegEncContext.
256 static void mpv_encode_defaults(MpegEncContext *s)
259 ff_mpv_common_defaults(s);
261 for (i = -16; i < 16; i++) {
262 default_fcode_tab[i + MAX_MV] = 1;
264 s->me.mv_penalty = default_mv_penalty;
265 s->fcode_tab = default_fcode_tab;
267 s->input_picture_number = 0;
268 s->picture_in_gop_number = 0;
271 av_cold int ff_dct_encode_init(MpegEncContext *s)
274 ff_dct_encode_init_x86(s);
276 if (CONFIG_H263_ENCODER)
277 ff_h263dsp_init(&s->h263dsp);
278 if (!s->dct_quantize)
279 s->dct_quantize = ff_dct_quantize_c;
281 s->denoise_dct = denoise_dct_c;
282 s->fast_dct_quantize = s->dct_quantize;
283 if (s->avctx->trellis)
284 s->dct_quantize = dct_quantize_trellis_c;
289 /* init video encoder */
290 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
292 MpegEncContext *s = avctx->priv_data;
293 AVCPBProperties *cpb_props;
294 int i, ret, format_supported;
296 mpv_encode_defaults(s);
298 switch (avctx->codec_id) {
299 case AV_CODEC_ID_MPEG2VIDEO:
300 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
301 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
302 av_log(avctx, AV_LOG_ERROR,
303 "only YUV420 and YUV422 are supported\n");
304 return AVERROR(EINVAL);
307 case AV_CODEC_ID_MJPEG:
308 case AV_CODEC_ID_AMV:
309 format_supported = 0;
310 /* JPEG color space */
311 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
312 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
313 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
314 (avctx->color_range == AVCOL_RANGE_JPEG &&
315 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
316 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
317 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
318 format_supported = 1;
319 /* MPEG color space */
320 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
321 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
322 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
323 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
324 format_supported = 1;
326 if (!format_supported) {
327 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
328 return AVERROR(EINVAL);
331 case AV_CODEC_ID_SPEEDHQ:
332 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
333 avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
334 avctx->pix_fmt != AV_PIX_FMT_YUV444P) {
335 av_log(avctx, AV_LOG_ERROR,
336 "only YUV420/YUV422/YUV444 are supported (no alpha support yet)\n");
337 return AVERROR(EINVAL);
341 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
342 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
343 return AVERROR(EINVAL);
347 switch (avctx->pix_fmt) {
348 case AV_PIX_FMT_YUVJ444P:
349 case AV_PIX_FMT_YUV444P:
350 s->chroma_format = CHROMA_444;
352 case AV_PIX_FMT_YUVJ422P:
353 case AV_PIX_FMT_YUV422P:
354 s->chroma_format = CHROMA_422;
356 case AV_PIX_FMT_YUVJ420P:
357 case AV_PIX_FMT_YUV420P:
359 s->chroma_format = CHROMA_420;
363 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
365 #if FF_API_PRIVATE_OPT
366 FF_DISABLE_DEPRECATION_WARNINGS
367 if (avctx->rtp_payload_size)
368 s->rtp_payload_size = avctx->rtp_payload_size;
369 if (avctx->me_penalty_compensation)
370 s->me_penalty_compensation = avctx->me_penalty_compensation;
372 s->me_pre = avctx->pre_me;
373 FF_ENABLE_DEPRECATION_WARNINGS
376 s->bit_rate = avctx->bit_rate;
377 s->width = avctx->width;
378 s->height = avctx->height;
379 if (avctx->gop_size > 600 &&
380 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
381 av_log(avctx, AV_LOG_WARNING,
382 "keyframe interval too large!, reducing it from %d to %d\n",
383 avctx->gop_size, 600);
384 avctx->gop_size = 600;
386 s->gop_size = avctx->gop_size;
388 if (avctx->max_b_frames > MAX_B_FRAMES) {
389 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
390 "is %d.\n", MAX_B_FRAMES);
391 avctx->max_b_frames = MAX_B_FRAMES;
393 s->max_b_frames = avctx->max_b_frames;
394 s->codec_id = avctx->codec->id;
395 s->strict_std_compliance = avctx->strict_std_compliance;
396 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
397 s->rtp_mode = !!s->rtp_payload_size;
398 s->intra_dc_precision = avctx->intra_dc_precision;
400 // workaround some differences between how applications specify dc precision
401 if (s->intra_dc_precision < 0) {
402 s->intra_dc_precision += 8;
403 } else if (s->intra_dc_precision >= 8)
404 s->intra_dc_precision -= 8;
406 if (s->intra_dc_precision < 0) {
407 av_log(avctx, AV_LOG_ERROR,
408 "intra dc precision must be positive, note some applications use"
409 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
410 return AVERROR(EINVAL);
413 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
416 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
417 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
418 return AVERROR(EINVAL);
420 s->user_specified_pts = AV_NOPTS_VALUE;
422 if (s->gop_size <= 1) {
430 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
432 s->adaptive_quant = (avctx->lumi_masking ||
433 avctx->dark_masking ||
434 avctx->temporal_cplx_masking ||
435 avctx->spatial_cplx_masking ||
438 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
441 s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
443 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
444 switch(avctx->codec_id) {
445 case AV_CODEC_ID_MPEG1VIDEO:
446 case AV_CODEC_ID_MPEG2VIDEO:
447 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
449 case AV_CODEC_ID_MPEG4:
450 case AV_CODEC_ID_MSMPEG4V1:
451 case AV_CODEC_ID_MSMPEG4V2:
452 case AV_CODEC_ID_MSMPEG4V3:
453 if (avctx->rc_max_rate >= 15000000) {
454 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
455 } else if(avctx->rc_max_rate >= 2000000) {
456 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
457 } else if(avctx->rc_max_rate >= 384000) {
458 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
460 avctx->rc_buffer_size = 40;
461 avctx->rc_buffer_size *= 16384;
464 if (avctx->rc_buffer_size) {
465 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
469 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
470 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
471 return AVERROR(EINVAL);
474 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
475 av_log(avctx, AV_LOG_INFO,
476 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
479 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
480 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
481 return AVERROR(EINVAL);
484 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
485 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
486 return AVERROR(EINVAL);
489 if (avctx->rc_max_rate &&
490 avctx->rc_max_rate == avctx->bit_rate &&
491 avctx->rc_max_rate != avctx->rc_min_rate) {
492 av_log(avctx, AV_LOG_INFO,
493 "impossible bitrate constraints, this will fail\n");
496 if (avctx->rc_buffer_size &&
497 avctx->bit_rate * (int64_t)avctx->time_base.num >
498 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
499 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
500 return AVERROR(EINVAL);
503 if (!s->fixed_qscale &&
504 avctx->bit_rate * av_q2d(avctx->time_base) >
505 avctx->bit_rate_tolerance) {
506 av_log(avctx, AV_LOG_WARNING,
507 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
508 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
511 if (avctx->rc_max_rate &&
512 avctx->rc_min_rate == avctx->rc_max_rate &&
513 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
514 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
515 90000LL * (avctx->rc_buffer_size - 1) >
516 avctx->rc_max_rate * 0xFFFFLL) {
517 av_log(avctx, AV_LOG_INFO,
518 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
519 "specified vbv buffer is too large for the given bitrate!\n");
522 if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
523 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
524 s->codec_id != AV_CODEC_ID_FLV1) {
525 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
526 return AVERROR(EINVAL);
529 if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
530 av_log(avctx, AV_LOG_ERROR,
531 "OBMC is only supported with simple mb decision\n");
532 return AVERROR(EINVAL);
535 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
536 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
537 return AVERROR(EINVAL);
540 if (s->max_b_frames &&
541 s->codec_id != AV_CODEC_ID_MPEG4 &&
542 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
543 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
544 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
545 return AVERROR(EINVAL);
547 if (s->max_b_frames < 0) {
548 av_log(avctx, AV_LOG_ERROR,
549 "max b frames must be 0 or positive for mpegvideo based encoders\n");
550 return AVERROR(EINVAL);
553 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
554 s->codec_id == AV_CODEC_ID_H263 ||
555 s->codec_id == AV_CODEC_ID_H263P) &&
556 (avctx->sample_aspect_ratio.num > 255 ||
557 avctx->sample_aspect_ratio.den > 255)) {
558 av_log(avctx, AV_LOG_WARNING,
559 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
560 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
561 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
562 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
565 if ((s->codec_id == AV_CODEC_ID_H263 ||
566 s->codec_id == AV_CODEC_ID_H263P) &&
567 (avctx->width > 2048 ||
568 avctx->height > 1152 )) {
569 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
570 return AVERROR(EINVAL);
572 if ((s->codec_id == AV_CODEC_ID_H263 ||
573 s->codec_id == AV_CODEC_ID_H263P) &&
574 ((avctx->width &3) ||
575 (avctx->height&3) )) {
576 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
577 return AVERROR(EINVAL);
580 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
581 (avctx->width > 4095 ||
582 avctx->height > 4095 )) {
583 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
584 return AVERROR(EINVAL);
587 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
588 (avctx->width > 16383 ||
589 avctx->height > 16383 )) {
590 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
591 return AVERROR(EINVAL);
594 if (s->codec_id == AV_CODEC_ID_RV10 &&
596 avctx->height&15 )) {
597 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
598 return AVERROR(EINVAL);
601 if (s->codec_id == AV_CODEC_ID_RV20 &&
604 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
605 return AVERROR(EINVAL);
608 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
609 s->codec_id == AV_CODEC_ID_WMV2) &&
611 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
612 return AVERROR(EINVAL);
615 if ((avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
616 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
617 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
618 return AVERROR(EINVAL);
621 #if FF_API_PRIVATE_OPT
622 FF_DISABLE_DEPRECATION_WARNINGS
623 if (avctx->mpeg_quant)
624 s->mpeg_quant = avctx->mpeg_quant;
625 FF_ENABLE_DEPRECATION_WARNINGS
628 // FIXME mpeg2 uses that too
629 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
630 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
631 av_log(avctx, AV_LOG_ERROR,
632 "mpeg2 style quantization not supported by codec\n");
633 return AVERROR(EINVAL);
636 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
637 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
638 return AVERROR(EINVAL);
641 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
642 avctx->mb_decision != FF_MB_DECISION_RD) {
643 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
644 return AVERROR(EINVAL);
647 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
648 (s->codec_id == AV_CODEC_ID_AMV ||
649 s->codec_id == AV_CODEC_ID_MJPEG)) {
650 // Used to produce garbage with MJPEG.
651 av_log(avctx, AV_LOG_ERROR,
652 "QP RD is no longer compatible with MJPEG or AMV\n");
653 return AVERROR(EINVAL);
656 #if FF_API_PRIVATE_OPT
657 FF_DISABLE_DEPRECATION_WARNINGS
658 if (avctx->scenechange_threshold)
659 s->scenechange_threshold = avctx->scenechange_threshold;
660 FF_ENABLE_DEPRECATION_WARNINGS
663 if (s->scenechange_threshold < 1000000000 &&
664 (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
665 av_log(avctx, AV_LOG_ERROR,
666 "closed gop with scene change detection are not supported yet, "
667 "set threshold to 1000000000\n");
668 return AVERROR_PATCHWELCOME;
671 if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
672 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
673 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
674 av_log(avctx, AV_LOG_ERROR,
675 "low delay forcing is only available for mpeg2, "
676 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
677 return AVERROR(EINVAL);
679 if (s->max_b_frames != 0) {
680 av_log(avctx, AV_LOG_ERROR,
681 "B-frames cannot be used with low delay\n");
682 return AVERROR(EINVAL);
686 if (s->q_scale_type == 1) {
687 if (avctx->qmax > 28) {
688 av_log(avctx, AV_LOG_ERROR,
689 "non linear quant only supports qmax <= 28 currently\n");
690 return AVERROR_PATCHWELCOME;
694 if (avctx->slices > 1 &&
695 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
696 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
697 return AVERROR(EINVAL);
700 if (avctx->thread_count > 1 &&
701 s->codec_id != AV_CODEC_ID_MPEG4 &&
702 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
703 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
704 s->codec_id != AV_CODEC_ID_MJPEG &&
705 (s->codec_id != AV_CODEC_ID_H263P)) {
706 av_log(avctx, AV_LOG_ERROR,
707 "multi threaded encoding not supported by codec\n");
708 return AVERROR_PATCHWELCOME;
711 if (avctx->thread_count < 1) {
712 av_log(avctx, AV_LOG_ERROR,
713 "automatic thread number detection not supported by codec, "
715 return AVERROR_PATCHWELCOME;
718 if (!avctx->time_base.den || !avctx->time_base.num) {
719 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
720 return AVERROR(EINVAL);
723 #if FF_API_PRIVATE_OPT
724 FF_DISABLE_DEPRECATION_WARNINGS
725 if (avctx->b_frame_strategy)
726 s->b_frame_strategy = avctx->b_frame_strategy;
727 if (avctx->b_sensitivity != 40)
728 s->b_sensitivity = avctx->b_sensitivity;
729 FF_ENABLE_DEPRECATION_WARNINGS
732 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
733 av_log(avctx, AV_LOG_INFO,
734 "notice: b_frame_strategy only affects the first pass\n");
735 s->b_frame_strategy = 0;
738 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
740 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
741 avctx->time_base.den /= i;
742 avctx->time_base.num /= i;
746 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
747 // (a + x * 3 / 8) / x
748 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
749 s->inter_quant_bias = 0;
751 s->intra_quant_bias = 0;
753 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
756 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
757 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
758 return AVERROR(EINVAL);
761 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
763 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
764 avctx->time_base.den > (1 << 16) - 1) {
765 av_log(avctx, AV_LOG_ERROR,
766 "timebase %d/%d not supported by MPEG 4 standard, "
767 "the maximum admitted value for the timebase denominator "
768 "is %d\n", avctx->time_base.num, avctx->time_base.den,
770 return AVERROR(EINVAL);
772 s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
774 switch (avctx->codec->id) {
775 case AV_CODEC_ID_MPEG1VIDEO:
776 s->out_format = FMT_MPEG1;
777 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
778 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
780 case AV_CODEC_ID_MPEG2VIDEO:
781 s->out_format = FMT_MPEG1;
782 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
783 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
786 case AV_CODEC_ID_MJPEG:
787 case AV_CODEC_ID_AMV:
788 s->out_format = FMT_MJPEG;
789 s->intra_only = 1; /* force intra only for jpeg */
790 if (!CONFIG_MJPEG_ENCODER)
791 return AVERROR_ENCODER_NOT_FOUND;
792 if ((ret = ff_mjpeg_encode_init(s)) < 0)
797 case AV_CODEC_ID_SPEEDHQ:
798 s->out_format = FMT_SPEEDHQ;
799 s->intra_only = 1; /* force intra only for SHQ */
800 if (!CONFIG_SPEEDHQ_ENCODER)
801 return AVERROR_ENCODER_NOT_FOUND;
802 if ((ret = ff_speedhq_encode_init(s)) < 0)
807 case AV_CODEC_ID_H261:
808 if (!CONFIG_H261_ENCODER)
809 return AVERROR_ENCODER_NOT_FOUND;
810 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
811 av_log(avctx, AV_LOG_ERROR,
812 "The specified picture size of %dx%d is not valid for the "
813 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
814 s->width, s->height);
815 return AVERROR(EINVAL);
817 s->out_format = FMT_H261;
820 s->rtp_mode = 0; /* Sliced encoding not supported */
822 case AV_CODEC_ID_H263:
823 if (!CONFIG_H263_ENCODER)
824 return AVERROR_ENCODER_NOT_FOUND;
825 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
826 s->width, s->height) == 8) {
827 av_log(avctx, AV_LOG_ERROR,
828 "The specified picture size of %dx%d is not valid for "
829 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
830 "352x288, 704x576, and 1408x1152. "
831 "Try H.263+.\n", s->width, s->height);
832 return AVERROR(EINVAL);
834 s->out_format = FMT_H263;
838 case AV_CODEC_ID_H263P:
839 s->out_format = FMT_H263;
842 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
843 s->modified_quant = s->h263_aic;
844 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
845 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
848 /* These are just to be sure */
852 case AV_CODEC_ID_FLV1:
853 s->out_format = FMT_H263;
854 s->h263_flv = 2; /* format = 1; 11-bit codes */
855 s->unrestricted_mv = 1;
856 s->rtp_mode = 0; /* don't allow GOB */
860 case AV_CODEC_ID_RV10:
861 s->out_format = FMT_H263;
865 case AV_CODEC_ID_RV20:
866 s->out_format = FMT_H263;
869 s->modified_quant = 1;
873 s->unrestricted_mv = 0;
875 case AV_CODEC_ID_MPEG4:
876 s->out_format = FMT_H263;
878 s->unrestricted_mv = 1;
879 s->low_delay = s->max_b_frames ? 0 : 1;
880 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
882 case AV_CODEC_ID_MSMPEG4V2:
883 s->out_format = FMT_H263;
885 s->unrestricted_mv = 1;
886 s->msmpeg4_version = 2;
890 case AV_CODEC_ID_MSMPEG4V3:
891 s->out_format = FMT_H263;
893 s->unrestricted_mv = 1;
894 s->msmpeg4_version = 3;
895 s->flipflop_rounding = 1;
899 case AV_CODEC_ID_WMV1:
900 s->out_format = FMT_H263;
902 s->unrestricted_mv = 1;
903 s->msmpeg4_version = 4;
904 s->flipflop_rounding = 1;
908 case AV_CODEC_ID_WMV2:
909 s->out_format = FMT_H263;
911 s->unrestricted_mv = 1;
912 s->msmpeg4_version = 5;
913 s->flipflop_rounding = 1;
918 return AVERROR(EINVAL);
921 #if FF_API_PRIVATE_OPT
922 FF_DISABLE_DEPRECATION_WARNINGS
923 if (avctx->noise_reduction)
924 s->noise_reduction = avctx->noise_reduction;
925 FF_ENABLE_DEPRECATION_WARNINGS
928 avctx->has_b_frames = !s->low_delay;
932 s->progressive_frame =
933 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
934 AV_CODEC_FLAG_INTERLACED_ME) ||
939 if ((ret = ff_mpv_common_init(s)) < 0)
942 ff_fdctdsp_init(&s->fdsp, avctx);
943 ff_me_cmp_init(&s->mecc, avctx);
944 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
945 ff_pixblockdsp_init(&s->pdsp, avctx);
946 ff_qpeldsp_init(&s->qdsp);
948 if (s->msmpeg4_version) {
949 int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
950 if (!(s->ac_stats = av_mallocz(ac_stats_size)))
951 return AVERROR(ENOMEM);
954 if (!(avctx->stats_out = av_mallocz(256)) ||
955 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
956 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
957 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
958 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
959 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
960 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
961 !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
962 !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
963 return AVERROR(ENOMEM);
965 if (s->noise_reduction) {
966 if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
967 return AVERROR(ENOMEM);
970 ff_dct_encode_init(s);
972 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
973 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
975 if (s->slice_context_count > 1) {
978 if (avctx->codec_id == AV_CODEC_ID_H263P)
979 s->h263_slice_structured = 1;
982 s->quant_precision = 5;
984 #if FF_API_PRIVATE_OPT
985 FF_DISABLE_DEPRECATION_WARNINGS
986 if (avctx->frame_skip_threshold)
987 s->frame_skip_threshold = avctx->frame_skip_threshold;
988 if (avctx->frame_skip_factor)
989 s->frame_skip_factor = avctx->frame_skip_factor;
990 if (avctx->frame_skip_exp)
991 s->frame_skip_exp = avctx->frame_skip_exp;
992 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
993 s->frame_skip_cmp = avctx->frame_skip_cmp;
994 FF_ENABLE_DEPRECATION_WARNINGS
997 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
998 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
1000 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
1001 ff_h261_encode_init(s);
1002 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
1003 ff_h263_encode_init(s);
1004 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
1005 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
1007 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1008 && s->out_format == FMT_MPEG1)
1009 ff_mpeg1_encode_init(s);
1012 for (i = 0; i < 64; i++) {
1013 int j = s->idsp.idct_permutation[i];
1014 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1016 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1017 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1018 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1019 s->intra_matrix[j] =
1020 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1021 } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
1022 s->intra_matrix[j] =
1023 s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1026 s->chroma_intra_matrix[j] =
1027 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1028 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1030 if (avctx->intra_matrix)
1031 s->intra_matrix[j] = avctx->intra_matrix[i];
1032 if (avctx->inter_matrix)
1033 s->inter_matrix[j] = avctx->inter_matrix[i];
1036 /* precompute matrix */
1037 /* for mjpeg, we do include qscale in the matrix */
1038 if (s->out_format != FMT_MJPEG) {
1039 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1040 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1042 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1043 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1047 if ((ret = ff_rate_control_init(s)) < 0)
1050 #if FF_API_PRIVATE_OPT
1051 FF_DISABLE_DEPRECATION_WARNINGS
1052 if (avctx->brd_scale)
1053 s->brd_scale = avctx->brd_scale;
1055 if (avctx->prediction_method)
1056 s->pred = avctx->prediction_method + 1;
1057 FF_ENABLE_DEPRECATION_WARNINGS
1060 if (s->b_frame_strategy == 2) {
1061 for (i = 0; i < s->max_b_frames + 2; i++) {
1062 s->tmp_frames[i] = av_frame_alloc();
1063 if (!s->tmp_frames[i])
1064 return AVERROR(ENOMEM);
1066 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1067 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1068 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1070 ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1076 cpb_props = ff_add_cpb_side_data(avctx);
1078 return AVERROR(ENOMEM);
1079 cpb_props->max_bitrate = avctx->rc_max_rate;
1080 cpb_props->min_bitrate = avctx->rc_min_rate;
1081 cpb_props->avg_bitrate = avctx->bit_rate;
1082 cpb_props->buffer_size = avctx->rc_buffer_size;
1087 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1089 MpegEncContext *s = avctx->priv_data;
1092 ff_rate_control_uninit(s);
1094 ff_mpv_common_end(s);
1095 if (CONFIG_MJPEG_ENCODER &&
1096 s->out_format == FMT_MJPEG)
1097 ff_mjpeg_encode_close(s);
1099 av_freep(&avctx->extradata);
1101 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1102 av_frame_free(&s->tmp_frames[i]);
1104 ff_free_picture_tables(&s->new_picture);
1105 ff_mpeg_unref_picture(avctx, &s->new_picture);
1107 av_freep(&avctx->stats_out);
1108 av_freep(&s->ac_stats);
1110 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1111 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1112 s->q_chroma_intra_matrix= NULL;
1113 s->q_chroma_intra_matrix16= NULL;
1114 av_freep(&s->q_intra_matrix);
1115 av_freep(&s->q_inter_matrix);
1116 av_freep(&s->q_intra_matrix16);
1117 av_freep(&s->q_inter_matrix16);
1118 av_freep(&s->input_picture);
1119 av_freep(&s->reordered_input_picture);
1120 av_freep(&s->dct_offset);
1125 static int get_sae(uint8_t *src, int ref, int stride)
1130 for (y = 0; y < 16; y++) {
1131 for (x = 0; x < 16; x++) {
1132 acc += FFABS(src[x + y * stride] - ref);
1139 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1140 uint8_t *ref, int stride)
1146 h = s->height & ~15;
1148 for (y = 0; y < h; y += 16) {
1149 for (x = 0; x < w; x += 16) {
1150 int offset = x + y * stride;
1151 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1153 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1154 int sae = get_sae(src + offset, mean, stride);
1156 acc += sae + 500 < sad;
1162 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1164 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1165 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1166 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1167 &s->linesize, &s->uvlinesize);
1170 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1172 Picture *pic = NULL;
1174 int i, display_picture_number = 0, ret;
1175 int encoding_delay = s->max_b_frames ? s->max_b_frames
1176 : (s->low_delay ? 0 : 1);
1177 int flush_offset = 1;
1182 display_picture_number = s->input_picture_number++;
1184 if (pts != AV_NOPTS_VALUE) {
1185 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1186 int64_t last = s->user_specified_pts;
1189 av_log(s->avctx, AV_LOG_ERROR,
1190 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1192 return AVERROR(EINVAL);
1195 if (!s->low_delay && display_picture_number == 1)
1196 s->dts_delta = pts - last;
1198 s->user_specified_pts = pts;
1200 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1201 s->user_specified_pts =
1202 pts = s->user_specified_pts + 1;
1203 av_log(s->avctx, AV_LOG_INFO,
1204 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1207 pts = display_picture_number;
1211 if (!pic_arg->buf[0] ||
1212 pic_arg->linesize[0] != s->linesize ||
1213 pic_arg->linesize[1] != s->uvlinesize ||
1214 pic_arg->linesize[2] != s->uvlinesize)
1216 if ((s->width & 15) || (s->height & 15))
1218 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1220 if (s->linesize & (STRIDE_ALIGN-1))
1223 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1224 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1226 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1230 pic = &s->picture[i];
1234 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1237 ret = alloc_picture(s, pic, direct);
1242 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1243 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1244 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1247 int h_chroma_shift, v_chroma_shift;
1248 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1252 for (i = 0; i < 3; i++) {
1253 int src_stride = pic_arg->linesize[i];
1254 int dst_stride = i ? s->uvlinesize : s->linesize;
1255 int h_shift = i ? h_chroma_shift : 0;
1256 int v_shift = i ? v_chroma_shift : 0;
1257 int w = s->width >> h_shift;
1258 int h = s->height >> v_shift;
1259 uint8_t *src = pic_arg->data[i];
1260 uint8_t *dst = pic->f->data[i];
1263 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1264 && !s->progressive_sequence
1265 && FFALIGN(s->height, 32) - s->height > 16)
1268 if (!s->avctx->rc_buffer_size)
1269 dst += INPLACE_OFFSET;
1271 if (src_stride == dst_stride)
1272 memcpy(dst, src, src_stride * h);
1275 uint8_t *dst2 = dst;
1277 memcpy(dst2, src, w);
1282 if ((s->width & 15) || (s->height & (vpad-1))) {
1283 s->mpvencdsp.draw_edges(dst, dst_stride,
1293 ret = av_frame_copy_props(pic->f, pic_arg);
1297 pic->f->display_picture_number = display_picture_number;
1298 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1300 /* Flushing: When we have not received enough input frames,
1301 * ensure s->input_picture[0] contains the first picture */
1302 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1303 if (s->input_picture[flush_offset])
1306 if (flush_offset <= 1)
1309 encoding_delay = encoding_delay - flush_offset + 1;
1312 /* shift buffer entries */
1313 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1314 s->input_picture[i - flush_offset] = s->input_picture[i];
1316 s->input_picture[encoding_delay] = (Picture*) pic;
1321 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1325 int64_t score64 = 0;
1327 for (plane = 0; plane < 3; plane++) {
1328 const int stride = p->f->linesize[plane];
1329 const int bw = plane ? 1 : 2;
1330 for (y = 0; y < s->mb_height * bw; y++) {
1331 for (x = 0; x < s->mb_width * bw; x++) {
1332 int off = p->shared ? 0 : 16;
1333 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1334 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1335 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1337 switch (FFABS(s->frame_skip_exp)) {
1338 case 0: score = FFMAX(score, v); break;
1339 case 1: score += FFABS(v); break;
1340 case 2: score64 += v * (int64_t)v; break;
1341 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1342 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1351 if (s->frame_skip_exp < 0)
1352 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1353 -1.0/s->frame_skip_exp);
1355 if (score64 < s->frame_skip_threshold)
1357 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1362 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1364 AVPacket pkt = { 0 };
1368 av_init_packet(&pkt);
1370 ret = avcodec_send_frame(c, frame);
1375 ret = avcodec_receive_packet(c, &pkt);
1378 av_packet_unref(&pkt);
1379 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1386 static int estimate_best_b_count(MpegEncContext *s)
1388 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1389 const int scale = s->brd_scale;
1390 int width = s->width >> scale;
1391 int height = s->height >> scale;
1392 int i, j, out_size, p_lambda, b_lambda, lambda2;
1393 int64_t best_rd = INT64_MAX;
1394 int best_b_count = -1;
1397 av_assert0(scale >= 0 && scale <= 3);
1400 //s->next_picture_ptr->quality;
1401 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1402 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1403 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1404 if (!b_lambda) // FIXME we should do this somewhere else
1405 b_lambda = p_lambda;
1406 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1409 for (i = 0; i < s->max_b_frames + 2; i++) {
1410 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1411 s->next_picture_ptr;
1414 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1415 pre_input = *pre_input_ptr;
1416 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1418 if (!pre_input.shared && i) {
1419 data[0] += INPLACE_OFFSET;
1420 data[1] += INPLACE_OFFSET;
1421 data[2] += INPLACE_OFFSET;
1424 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1425 s->tmp_frames[i]->linesize[0],
1427 pre_input.f->linesize[0],
1429 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1430 s->tmp_frames[i]->linesize[1],
1432 pre_input.f->linesize[1],
1433 width >> 1, height >> 1);
1434 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1435 s->tmp_frames[i]->linesize[2],
1437 pre_input.f->linesize[2],
1438 width >> 1, height >> 1);
1442 for (j = 0; j < s->max_b_frames + 1; j++) {
1446 if (!s->input_picture[j])
1449 c = avcodec_alloc_context3(NULL);
1451 return AVERROR(ENOMEM);
1455 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1456 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1457 c->mb_decision = s->avctx->mb_decision;
1458 c->me_cmp = s->avctx->me_cmp;
1459 c->mb_cmp = s->avctx->mb_cmp;
1460 c->me_sub_cmp = s->avctx->me_sub_cmp;
1461 c->pix_fmt = AV_PIX_FMT_YUV420P;
1462 c->time_base = s->avctx->time_base;
1463 c->max_b_frames = s->max_b_frames;
1465 ret = avcodec_open2(c, codec, NULL);
1469 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1470 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1472 out_size = encode_frame(c, s->tmp_frames[0]);
1478 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1480 for (i = 0; i < s->max_b_frames + 1; i++) {
1481 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1483 s->tmp_frames[i + 1]->pict_type = is_p ?
1484 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1485 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1487 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1493 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1496 /* get the delayed frames */
1497 out_size = encode_frame(c, NULL);
1502 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1504 rd += c->error[0] + c->error[1] + c->error[2];
1512 avcodec_free_context(&c);
1517 return best_b_count;
1520 static int select_input_picture(MpegEncContext *s)
1524 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1525 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1526 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1528 /* set next picture type & ordering */
1529 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1530 if (s->frame_skip_threshold || s->frame_skip_factor) {
1531 if (s->picture_in_gop_number < s->gop_size &&
1532 s->next_picture_ptr &&
1533 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1534 // FIXME check that the gop check above is +-1 correct
1535 av_frame_unref(s->input_picture[0]->f);
1537 ff_vbv_update(s, 0);
1543 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1544 !s->next_picture_ptr || s->intra_only) {
1545 s->reordered_input_picture[0] = s->input_picture[0];
1546 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1547 s->reordered_input_picture[0]->f->coded_picture_number =
1548 s->coded_picture_number++;
1552 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1553 for (i = 0; i < s->max_b_frames + 1; i++) {
1554 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1556 if (pict_num >= s->rc_context.num_entries)
1558 if (!s->input_picture[i]) {
1559 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1563 s->input_picture[i]->f->pict_type =
1564 s->rc_context.entry[pict_num].new_pict_type;
1568 if (s->b_frame_strategy == 0) {
1569 b_frames = s->max_b_frames;
1570 while (b_frames && !s->input_picture[b_frames])
1572 } else if (s->b_frame_strategy == 1) {
1573 for (i = 1; i < s->max_b_frames + 1; i++) {
1574 if (s->input_picture[i] &&
1575 s->input_picture[i]->b_frame_score == 0) {
1576 s->input_picture[i]->b_frame_score =
1578 s->input_picture[i ]->f->data[0],
1579 s->input_picture[i - 1]->f->data[0],
1583 for (i = 0; i < s->max_b_frames + 1; i++) {
1584 if (!s->input_picture[i] ||
1585 s->input_picture[i]->b_frame_score - 1 >
1586 s->mb_num / s->b_sensitivity)
1590 b_frames = FFMAX(0, i - 1);
1593 for (i = 0; i < b_frames + 1; i++) {
1594 s->input_picture[i]->b_frame_score = 0;
1596 } else if (s->b_frame_strategy == 2) {
1597 b_frames = estimate_best_b_count(s);
1604 for (i = b_frames - 1; i >= 0; i--) {
1605 int type = s->input_picture[i]->f->pict_type;
1606 if (type && type != AV_PICTURE_TYPE_B)
1609 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1610 b_frames == s->max_b_frames) {
1611 av_log(s->avctx, AV_LOG_ERROR,
1612 "warning, too many B-frames in a row\n");
1615 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1616 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1617 s->gop_size > s->picture_in_gop_number) {
1618 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1620 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1622 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1626 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1627 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1630 s->reordered_input_picture[0] = s->input_picture[b_frames];
1631 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1632 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1633 s->reordered_input_picture[0]->f->coded_picture_number =
1634 s->coded_picture_number++;
1635 for (i = 0; i < b_frames; i++) {
1636 s->reordered_input_picture[i + 1] = s->input_picture[i];
1637 s->reordered_input_picture[i + 1]->f->pict_type =
1639 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1640 s->coded_picture_number++;
1645 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1647 if (s->reordered_input_picture[0]) {
1648 s->reordered_input_picture[0]->reference =
1649 s->reordered_input_picture[0]->f->pict_type !=
1650 AV_PICTURE_TYPE_B ? 3 : 0;
1652 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1655 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1656 // input is a shared pix, so we can't modify it -> allocate a new
1657 // one & ensure that the shared one is reuseable
1660 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1663 pic = &s->picture[i];
1665 pic->reference = s->reordered_input_picture[0]->reference;
1666 if (alloc_picture(s, pic, 0) < 0) {
1670 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1674 /* mark us unused / free shared pic */
1675 av_frame_unref(s->reordered_input_picture[0]->f);
1676 s->reordered_input_picture[0]->shared = 0;
1678 s->current_picture_ptr = pic;
1680 // input is not a shared pix -> reuse buffer for current_pix
1681 s->current_picture_ptr = s->reordered_input_picture[0];
1682 for (i = 0; i < 4; i++) {
1683 s->new_picture.f->data[i] += INPLACE_OFFSET;
1686 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1687 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1688 s->current_picture_ptr)) < 0)
1691 s->picture_number = s->new_picture.f->display_picture_number;
1696 static void frame_end(MpegEncContext *s)
1698 if (s->unrestricted_mv &&
1699 s->current_picture.reference &&
1701 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1702 int hshift = desc->log2_chroma_w;
1703 int vshift = desc->log2_chroma_h;
1704 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1705 s->current_picture.f->linesize[0],
1706 s->h_edge_pos, s->v_edge_pos,
1707 EDGE_WIDTH, EDGE_WIDTH,
1708 EDGE_TOP | EDGE_BOTTOM);
1709 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1710 s->current_picture.f->linesize[1],
1711 s->h_edge_pos >> hshift,
1712 s->v_edge_pos >> vshift,
1713 EDGE_WIDTH >> hshift,
1714 EDGE_WIDTH >> vshift,
1715 EDGE_TOP | EDGE_BOTTOM);
1716 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1717 s->current_picture.f->linesize[2],
1718 s->h_edge_pos >> hshift,
1719 s->v_edge_pos >> vshift,
1720 EDGE_WIDTH >> hshift,
1721 EDGE_WIDTH >> vshift,
1722 EDGE_TOP | EDGE_BOTTOM);
1727 s->last_pict_type = s->pict_type;
1728 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1729 if (s->pict_type!= AV_PICTURE_TYPE_B)
1730 s->last_non_b_pict_type = s->pict_type;
1732 #if FF_API_CODED_FRAME
1733 FF_DISABLE_DEPRECATION_WARNINGS
1734 av_frame_unref(s->avctx->coded_frame);
1735 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1736 FF_ENABLE_DEPRECATION_WARNINGS
1738 #if FF_API_ERROR_FRAME
1739 FF_DISABLE_DEPRECATION_WARNINGS
1740 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1741 sizeof(s->current_picture.encoding_error));
1742 FF_ENABLE_DEPRECATION_WARNINGS
1746 static void update_noise_reduction(MpegEncContext *s)
1750 for (intra = 0; intra < 2; intra++) {
1751 if (s->dct_count[intra] > (1 << 16)) {
1752 for (i = 0; i < 64; i++) {
1753 s->dct_error_sum[intra][i] >>= 1;
1755 s->dct_count[intra] >>= 1;
1758 for (i = 0; i < 64; i++) {
1759 s->dct_offset[intra][i] = (s->noise_reduction *
1760 s->dct_count[intra] +
1761 s->dct_error_sum[intra][i] / 2) /
1762 (s->dct_error_sum[intra][i] + 1);
1767 static int frame_start(MpegEncContext *s)
1771 /* mark & release old frames */
1772 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1773 s->last_picture_ptr != s->next_picture_ptr &&
1774 s->last_picture_ptr->f->buf[0]) {
1775 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1778 s->current_picture_ptr->f->pict_type = s->pict_type;
1779 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1781 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1782 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1783 s->current_picture_ptr)) < 0)
1786 if (s->pict_type != AV_PICTURE_TYPE_B) {
1787 s->last_picture_ptr = s->next_picture_ptr;
1789 s->next_picture_ptr = s->current_picture_ptr;
1792 if (s->last_picture_ptr) {
1793 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1794 if (s->last_picture_ptr->f->buf[0] &&
1795 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1796 s->last_picture_ptr)) < 0)
1799 if (s->next_picture_ptr) {
1800 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1801 if (s->next_picture_ptr->f->buf[0] &&
1802 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1803 s->next_picture_ptr)) < 0)
1807 if (s->picture_structure!= PICT_FRAME) {
1809 for (i = 0; i < 4; i++) {
1810 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1811 s->current_picture.f->data[i] +=
1812 s->current_picture.f->linesize[i];
1814 s->current_picture.f->linesize[i] *= 2;
1815 s->last_picture.f->linesize[i] *= 2;
1816 s->next_picture.f->linesize[i] *= 2;
1820 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1821 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1822 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1823 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1824 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1825 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1827 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1828 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1831 if (s->dct_error_sum) {
1832 av_assert2(s->noise_reduction && s->encoding);
1833 update_noise_reduction(s);
1839 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1840 const AVFrame *pic_arg, int *got_packet)
1842 MpegEncContext *s = avctx->priv_data;
1843 int i, stuffing_count, ret;
1844 int context_count = s->slice_context_count;
1846 s->vbv_ignore_qmax = 0;
1848 s->picture_in_gop_number++;
1850 if (load_input_picture(s, pic_arg) < 0)
1853 if (select_input_picture(s) < 0) {
1858 if (s->new_picture.f->data[0]) {
1859 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1860 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1862 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1863 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1866 s->mb_info_ptr = av_packet_new_side_data(pkt,
1867 AV_PKT_DATA_H263_MB_INFO,
1868 s->mb_width*s->mb_height*12);
1869 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1872 for (i = 0; i < context_count; i++) {
1873 int start_y = s->thread_context[i]->start_mb_y;
1874 int end_y = s->thread_context[i]-> end_mb_y;
1875 int h = s->mb_height;
1876 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1877 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1879 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1882 s->pict_type = s->new_picture.f->pict_type;
1884 ret = frame_start(s);
1888 ret = encode_picture(s, s->picture_number);
1889 if (growing_buffer) {
1890 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1891 pkt->data = s->pb.buf;
1892 pkt->size = avctx->internal->byte_buffer_size;
1897 #if FF_API_STAT_BITS
1898 FF_DISABLE_DEPRECATION_WARNINGS
1899 avctx->header_bits = s->header_bits;
1900 avctx->mv_bits = s->mv_bits;
1901 avctx->misc_bits = s->misc_bits;
1902 avctx->i_tex_bits = s->i_tex_bits;
1903 avctx->p_tex_bits = s->p_tex_bits;
1904 avctx->i_count = s->i_count;
1905 // FIXME f/b_count in avctx
1906 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1907 avctx->skip_count = s->skip_count;
1908 FF_ENABLE_DEPRECATION_WARNINGS
1913 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1914 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1916 if (avctx->rc_buffer_size) {
1917 RateControlContext *rcc = &s->rc_context;
1918 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1919 int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1920 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1922 if (put_bits_count(&s->pb) > max_size &&
1923 s->lambda < s->lmax) {
1924 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1925 (s->qscale + 1) / s->qscale);
1926 if (s->adaptive_quant) {
1928 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1929 s->lambda_table[i] =
1930 FFMAX(s->lambda_table[i] + min_step,
1931 s->lambda_table[i] * (s->qscale + 1) /
1934 s->mb_skipped = 0; // done in frame_start()
1935 // done in encode_picture() so we must undo it
1936 if (s->pict_type == AV_PICTURE_TYPE_P) {
1937 if (s->flipflop_rounding ||
1938 s->codec_id == AV_CODEC_ID_H263P ||
1939 s->codec_id == AV_CODEC_ID_MPEG4)
1940 s->no_rounding ^= 1;
1942 if (s->pict_type != AV_PICTURE_TYPE_B) {
1943 s->time_base = s->last_time_base;
1944 s->last_non_b_time = s->time - s->pp_time;
1946 for (i = 0; i < context_count; i++) {
1947 PutBitContext *pb = &s->thread_context[i]->pb;
1948 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1950 s->vbv_ignore_qmax = 1;
1951 av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1955 av_assert0(avctx->rc_max_rate);
1958 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1959 ff_write_pass1_stats(s);
1961 for (i = 0; i < 4; i++) {
1962 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1963 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1965 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1966 s->current_picture_ptr->encoding_error,
1967 (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1970 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1971 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1972 s->misc_bits + s->i_tex_bits +
1974 flush_put_bits(&s->pb);
1975 s->frame_bits = put_bits_count(&s->pb);
1977 stuffing_count = ff_vbv_update(s, s->frame_bits);
1978 s->stuffing_bits = 8*stuffing_count;
1979 if (stuffing_count) {
1980 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1981 stuffing_count + 50) {
1982 av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1986 switch (s->codec_id) {
1987 case AV_CODEC_ID_MPEG1VIDEO:
1988 case AV_CODEC_ID_MPEG2VIDEO:
1989 while (stuffing_count--) {
1990 put_bits(&s->pb, 8, 0);
1993 case AV_CODEC_ID_MPEG4:
1994 put_bits(&s->pb, 16, 0);
1995 put_bits(&s->pb, 16, 0x1C3);
1996 stuffing_count -= 4;
1997 while (stuffing_count--) {
1998 put_bits(&s->pb, 8, 0xFF);
2002 av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2004 flush_put_bits(&s->pb);
2005 s->frame_bits = put_bits_count(&s->pb);
2008 /* update MPEG-1/2 vbv_delay for CBR */
2009 if (avctx->rc_max_rate &&
2010 avctx->rc_min_rate == avctx->rc_max_rate &&
2011 s->out_format == FMT_MPEG1 &&
2012 90000LL * (avctx->rc_buffer_size - 1) <=
2013 avctx->rc_max_rate * 0xFFFFLL) {
2014 AVCPBProperties *props;
2017 int vbv_delay, min_delay;
2018 double inbits = avctx->rc_max_rate *
2019 av_q2d(avctx->time_base);
2020 int minbits = s->frame_bits - 8 *
2021 (s->vbv_delay_ptr - s->pb.buf - 1);
2022 double bits = s->rc_context.buffer_index + minbits - inbits;
2025 av_log(avctx, AV_LOG_ERROR,
2026 "Internal error, negative bits\n");
2028 av_assert1(s->repeat_first_field == 0);
2030 vbv_delay = bits * 90000 / avctx->rc_max_rate;
2031 min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2034 vbv_delay = FFMAX(vbv_delay, min_delay);
2036 av_assert0(vbv_delay < 0xFFFF);
2038 s->vbv_delay_ptr[0] &= 0xF8;
2039 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2040 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2041 s->vbv_delay_ptr[2] &= 0x07;
2042 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2044 props = av_cpb_properties_alloc(&props_size);
2046 return AVERROR(ENOMEM);
2047 props->vbv_delay = vbv_delay * 300;
2049 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2050 (uint8_t*)props, props_size);
2056 #if FF_API_VBV_DELAY
2057 FF_DISABLE_DEPRECATION_WARNINGS
2058 avctx->vbv_delay = vbv_delay * 300;
2059 FF_ENABLE_DEPRECATION_WARNINGS
2062 s->total_bits += s->frame_bits;
2063 #if FF_API_STAT_BITS
2064 FF_DISABLE_DEPRECATION_WARNINGS
2065 avctx->frame_bits = s->frame_bits;
2066 FF_ENABLE_DEPRECATION_WARNINGS
2070 pkt->pts = s->current_picture.f->pts;
2071 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2072 if (!s->current_picture.f->coded_picture_number)
2073 pkt->dts = pkt->pts - s->dts_delta;
2075 pkt->dts = s->reordered_pts;
2076 s->reordered_pts = pkt->pts;
2078 pkt->dts = pkt->pts;
2079 if (s->current_picture.f->key_frame)
2080 pkt->flags |= AV_PKT_FLAG_KEY;
2082 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2087 /* release non-reference frames */
2088 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2089 if (!s->picture[i].reference)
2090 ff_mpeg_unref_picture(avctx, &s->picture[i]);
2093 av_assert1((s->frame_bits & 7) == 0);
2095 pkt->size = s->frame_bits / 8;
2096 *got_packet = !!pkt->size;
2100 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2101 int n, int threshold)
2103 static const char tab[64] = {
2104 3, 2, 2, 1, 1, 1, 1, 1,
2105 1, 1, 1, 1, 1, 1, 1, 1,
2106 1, 1, 1, 1, 1, 1, 1, 1,
2107 0, 0, 0, 0, 0, 0, 0, 0,
2108 0, 0, 0, 0, 0, 0, 0, 0,
2109 0, 0, 0, 0, 0, 0, 0, 0,
2110 0, 0, 0, 0, 0, 0, 0, 0,
2111 0, 0, 0, 0, 0, 0, 0, 0
2116 int16_t *block = s->block[n];
2117 const int last_index = s->block_last_index[n];
2120 if (threshold < 0) {
2122 threshold = -threshold;
2126 /* Are all we could set to zero already zero? */
2127 if (last_index <= skip_dc - 1)
2130 for (i = 0; i <= last_index; i++) {
2131 const int j = s->intra_scantable.permutated[i];
2132 const int level = FFABS(block[j]);
2134 if (skip_dc && i == 0)
2138 } else if (level > 1) {
2144 if (score >= threshold)
2146 for (i = skip_dc; i <= last_index; i++) {
2147 const int j = s->intra_scantable.permutated[i];
2151 s->block_last_index[n] = 0;
2153 s->block_last_index[n] = -1;
2156 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2160 const int maxlevel = s->max_qcoeff;
2161 const int minlevel = s->min_qcoeff;
2165 i = 1; // skip clipping of intra dc
2169 for (; i <= last_index; i++) {
2170 const int j = s->intra_scantable.permutated[i];
2171 int level = block[j];
2173 if (level > maxlevel) {
2176 } else if (level < minlevel) {
2184 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2185 av_log(s->avctx, AV_LOG_INFO,
2186 "warning, clipping %d dct coefficients to %d..%d\n",
2187 overflow, minlevel, maxlevel);
2190 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2194 for (y = 0; y < 8; y++) {
2195 for (x = 0; x < 8; x++) {
2201 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2202 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2203 int v = ptr[x2 + y2 * stride];
2209 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2214 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2215 int motion_x, int motion_y,
2216 int mb_block_height,
2220 int16_t weight[12][64];
2221 int16_t orig[12][64];
2222 const int mb_x = s->mb_x;
2223 const int mb_y = s->mb_y;
2226 int dct_offset = s->linesize * 8; // default for progressive frames
2227 int uv_dct_offset = s->uvlinesize * 8;
2228 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2229 ptrdiff_t wrap_y, wrap_c;
2231 for (i = 0; i < mb_block_count; i++)
2232 skip_dct[i] = s->skipdct;
2234 if (s->adaptive_quant) {
2235 const int last_qp = s->qscale;
2236 const int mb_xy = mb_x + mb_y * s->mb_stride;
2238 s->lambda = s->lambda_table[mb_xy];
2241 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2242 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2243 s->dquant = s->qscale - last_qp;
2245 if (s->out_format == FMT_H263) {
2246 s->dquant = av_clip(s->dquant, -2, 2);
2248 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2250 if (s->pict_type == AV_PICTURE_TYPE_B) {
2251 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2254 if (s->mv_type == MV_TYPE_8X8)
2260 ff_set_qscale(s, last_qp + s->dquant);
2261 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2262 ff_set_qscale(s, s->qscale + s->dquant);
2264 wrap_y = s->linesize;
2265 wrap_c = s->uvlinesize;
2266 ptr_y = s->new_picture.f->data[0] +
2267 (mb_y * 16 * wrap_y) + mb_x * 16;
2268 ptr_cb = s->new_picture.f->data[1] +
2269 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2270 ptr_cr = s->new_picture.f->data[2] +
2271 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2273 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2274 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2275 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2276 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2277 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2279 16, 16, mb_x * 16, mb_y * 16,
2280 s->width, s->height);
2282 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2284 mb_block_width, mb_block_height,
2285 mb_x * mb_block_width, mb_y * mb_block_height,
2287 ptr_cb = ebuf + 16 * wrap_y;
2288 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2290 mb_block_width, mb_block_height,
2291 mb_x * mb_block_width, mb_y * mb_block_height,
2293 ptr_cr = ebuf + 16 * wrap_y + 16;
2297 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2298 int progressive_score, interlaced_score;
2300 s->interlaced_dct = 0;
2301 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2302 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2303 NULL, wrap_y, 8) - 400;
2305 if (progressive_score > 0) {
2306 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2307 NULL, wrap_y * 2, 8) +
2308 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2309 NULL, wrap_y * 2, 8);
2310 if (progressive_score > interlaced_score) {
2311 s->interlaced_dct = 1;
2313 dct_offset = wrap_y;
2314 uv_dct_offset = wrap_c;
2316 if (s->chroma_format == CHROMA_422 ||
2317 s->chroma_format == CHROMA_444)
2323 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2324 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2325 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2326 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2328 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2332 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2333 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2334 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2335 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2336 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2337 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2338 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2339 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2340 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2341 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2342 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2343 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2347 op_pixels_func (*op_pix)[4];
2348 qpel_mc_func (*op_qpix)[16];
2349 uint8_t *dest_y, *dest_cb, *dest_cr;
2351 dest_y = s->dest[0];
2352 dest_cb = s->dest[1];
2353 dest_cr = s->dest[2];
2355 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2356 op_pix = s->hdsp.put_pixels_tab;
2357 op_qpix = s->qdsp.put_qpel_pixels_tab;
2359 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2360 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2363 if (s->mv_dir & MV_DIR_FORWARD) {
2364 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2365 s->last_picture.f->data,
2367 op_pix = s->hdsp.avg_pixels_tab;
2368 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2370 if (s->mv_dir & MV_DIR_BACKWARD) {
2371 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2372 s->next_picture.f->data,
2376 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2377 int progressive_score, interlaced_score;
2379 s->interlaced_dct = 0;
2380 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2381 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2385 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2386 progressive_score -= 400;
2388 if (progressive_score > 0) {
2389 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2391 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2395 if (progressive_score > interlaced_score) {
2396 s->interlaced_dct = 1;
2398 dct_offset = wrap_y;
2399 uv_dct_offset = wrap_c;
2401 if (s->chroma_format == CHROMA_422)
2407 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2408 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2409 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2410 dest_y + dct_offset, wrap_y);
2411 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2412 dest_y + dct_offset + 8, wrap_y);
2414 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2418 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2419 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2420 if (!s->chroma_y_shift) { /* 422 */
2421 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2422 dest_cb + uv_dct_offset, wrap_c);
2423 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2424 dest_cr + uv_dct_offset, wrap_c);
2427 /* pre quantization */
2428 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2429 2 * s->qscale * s->qscale) {
2431 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2433 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2435 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2436 wrap_y, 8) < 20 * s->qscale)
2438 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2439 wrap_y, 8) < 20 * s->qscale)
2441 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2443 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2445 if (!s->chroma_y_shift) { /* 422 */
2446 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2447 dest_cb + uv_dct_offset,
2448 wrap_c, 8) < 20 * s->qscale)
2450 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2451 dest_cr + uv_dct_offset,
2452 wrap_c, 8) < 20 * s->qscale)
2458 if (s->quantizer_noise_shaping) {
2460 get_visual_weight(weight[0], ptr_y , wrap_y);
2462 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2464 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2466 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2468 get_visual_weight(weight[4], ptr_cb , wrap_c);
2470 get_visual_weight(weight[5], ptr_cr , wrap_c);
2471 if (!s->chroma_y_shift) { /* 422 */
2473 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2476 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2479 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2482 /* DCT & quantize */
2483 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2485 for (i = 0; i < mb_block_count; i++) {
2488 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2489 // FIXME we could decide to change to quantizer instead of
2491 // JS: I don't think that would be a good idea it could lower
2492 // quality instead of improve it. Just INTRADC clipping
2493 // deserves changes in quantizer
2495 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2497 s->block_last_index[i] = -1;
2499 if (s->quantizer_noise_shaping) {
2500 for (i = 0; i < mb_block_count; i++) {
2502 s->block_last_index[i] =
2503 dct_quantize_refine(s, s->block[i], weight[i],
2504 orig[i], i, s->qscale);
2509 if (s->luma_elim_threshold && !s->mb_intra)
2510 for (i = 0; i < 4; i++)
2511 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2512 if (s->chroma_elim_threshold && !s->mb_intra)
2513 for (i = 4; i < mb_block_count; i++)
2514 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2516 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2517 for (i = 0; i < mb_block_count; i++) {
2518 if (s->block_last_index[i] == -1)
2519 s->coded_score[i] = INT_MAX / 256;
2524 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2525 s->block_last_index[4] =
2526 s->block_last_index[5] = 0;
2528 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2529 if (!s->chroma_y_shift) { /* 422 / 444 */
2530 for (i=6; i<12; i++) {
2531 s->block_last_index[i] = 0;
2532 s->block[i][0] = s->block[4][0];
2537 // non c quantize code returns incorrect block_last_index FIXME
2538 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2539 for (i = 0; i < mb_block_count; i++) {
2541 if (s->block_last_index[i] > 0) {
2542 for (j = 63; j > 0; j--) {
2543 if (s->block[i][s->intra_scantable.permutated[j]])
2546 s->block_last_index[i] = j;
2551 /* huffman encode */
2552 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2553 case AV_CODEC_ID_MPEG1VIDEO:
2554 case AV_CODEC_ID_MPEG2VIDEO:
2555 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2556 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2558 case AV_CODEC_ID_MPEG4:
2559 if (CONFIG_MPEG4_ENCODER)
2560 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2562 case AV_CODEC_ID_MSMPEG4V2:
2563 case AV_CODEC_ID_MSMPEG4V3:
2564 case AV_CODEC_ID_WMV1:
2565 if (CONFIG_MSMPEG4_ENCODER)
2566 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2568 case AV_CODEC_ID_WMV2:
2569 if (CONFIG_WMV2_ENCODER)
2570 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2572 case AV_CODEC_ID_H261:
2573 if (CONFIG_H261_ENCODER)
2574 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2576 case AV_CODEC_ID_H263:
2577 case AV_CODEC_ID_H263P:
2578 case AV_CODEC_ID_FLV1:
2579 case AV_CODEC_ID_RV10:
2580 case AV_CODEC_ID_RV20:
2581 if (CONFIG_H263_ENCODER)
2582 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2584 case AV_CODEC_ID_MJPEG:
2585 case AV_CODEC_ID_AMV:
2586 if (CONFIG_MJPEG_ENCODER)
2587 ff_mjpeg_encode_mb(s, s->block);
2589 case AV_CODEC_ID_SPEEDHQ:
2590 if (CONFIG_SPEEDHQ_ENCODER)
2591 ff_speedhq_encode_mb(s, s->block);
2598 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2600 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2601 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2602 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2605 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2608 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2611 d->mb_skip_run= s->mb_skip_run;
2613 d->last_dc[i] = s->last_dc[i];
2616 d->mv_bits= s->mv_bits;
2617 d->i_tex_bits= s->i_tex_bits;
2618 d->p_tex_bits= s->p_tex_bits;
2619 d->i_count= s->i_count;
2620 d->f_count= s->f_count;
2621 d->b_count= s->b_count;
2622 d->skip_count= s->skip_count;
2623 d->misc_bits= s->misc_bits;
2627 d->qscale= s->qscale;
2628 d->dquant= s->dquant;
2630 d->esc3_level_length= s->esc3_level_length;
2633 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2636 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2637 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2640 d->mb_skip_run= s->mb_skip_run;
2642 d->last_dc[i] = s->last_dc[i];
2645 d->mv_bits= s->mv_bits;
2646 d->i_tex_bits= s->i_tex_bits;
2647 d->p_tex_bits= s->p_tex_bits;
2648 d->i_count= s->i_count;
2649 d->f_count= s->f_count;
2650 d->b_count= s->b_count;
2651 d->skip_count= s->skip_count;
2652 d->misc_bits= s->misc_bits;
2654 d->mb_intra= s->mb_intra;
2655 d->mb_skipped= s->mb_skipped;
2656 d->mv_type= s->mv_type;
2657 d->mv_dir= s->mv_dir;
2659 if(s->data_partitioning){
2661 d->tex_pb= s->tex_pb;
2665 d->block_last_index[i]= s->block_last_index[i];
2666 d->interlaced_dct= s->interlaced_dct;
2667 d->qscale= s->qscale;
2669 d->esc3_level_length= s->esc3_level_length;
2672 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2673 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2674 int *dmin, int *next_block, int motion_x, int motion_y)
2677 uint8_t *dest_backup[3];
2679 copy_context_before_encode(s, backup, type);
2681 s->block= s->blocks[*next_block];
2682 s->pb= pb[*next_block];
2683 if(s->data_partitioning){
2684 s->pb2 = pb2 [*next_block];
2685 s->tex_pb= tex_pb[*next_block];
2689 memcpy(dest_backup, s->dest, sizeof(s->dest));
2690 s->dest[0] = s->sc.rd_scratchpad;
2691 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2692 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2693 av_assert0(s->linesize >= 32); //FIXME
2696 encode_mb(s, motion_x, motion_y);
2698 score= put_bits_count(&s->pb);
2699 if(s->data_partitioning){
2700 score+= put_bits_count(&s->pb2);
2701 score+= put_bits_count(&s->tex_pb);
2704 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2705 ff_mpv_reconstruct_mb(s, s->block);
2707 score *= s->lambda2;
2708 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2712 memcpy(s->dest, dest_backup, sizeof(s->dest));
2719 copy_context_after_encode(best, s, type);
2723 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2724 const uint32_t *sq = ff_square_tab + 256;
2729 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2730 else if(w==8 && h==8)
2731 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2735 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2744 static int sse_mb(MpegEncContext *s){
2748 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2749 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2752 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2753 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2754 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2755 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2757 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2758 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2759 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2762 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2763 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2764 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2767 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2768 MpegEncContext *s= *(void**)arg;
2772 s->me.dia_size= s->avctx->pre_dia_size;
2773 s->first_slice_line=1;
2774 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2775 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2776 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2778 s->first_slice_line=0;
2786 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2787 MpegEncContext *s= *(void**)arg;
2789 s->me.dia_size= s->avctx->dia_size;
2790 s->first_slice_line=1;
2791 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2792 s->mb_x=0; //for block init below
2793 ff_init_block_index(s);
2794 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2795 s->block_index[0]+=2;
2796 s->block_index[1]+=2;
2797 s->block_index[2]+=2;
2798 s->block_index[3]+=2;
2800 /* compute motion vector & mb_type and store in context */
2801 if(s->pict_type==AV_PICTURE_TYPE_B)
2802 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2804 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2806 s->first_slice_line=0;
2811 static int mb_var_thread(AVCodecContext *c, void *arg){
2812 MpegEncContext *s= *(void**)arg;
2815 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2816 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2819 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2821 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2823 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2824 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2826 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2827 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2828 s->me.mb_var_sum_temp += varc;
2834 static void write_slice_end(MpegEncContext *s){
2835 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2836 if(s->partitioned_frame){
2837 ff_mpeg4_merge_partitions(s);
2840 ff_mpeg4_stuffing(&s->pb);
2841 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2842 ff_mjpeg_encode_stuffing(s);
2843 } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2844 ff_speedhq_end_slice(s);
2847 flush_put_bits(&s->pb);
2849 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2850 s->misc_bits+= get_bits_diff(s);
2853 static void write_mb_info(MpegEncContext *s)
2855 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2856 int offset = put_bits_count(&s->pb);
2857 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2858 int gobn = s->mb_y / s->gob_index;
2860 if (CONFIG_H263_ENCODER)
2861 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2862 bytestream_put_le32(&ptr, offset);
2863 bytestream_put_byte(&ptr, s->qscale);
2864 bytestream_put_byte(&ptr, gobn);
2865 bytestream_put_le16(&ptr, mba);
2866 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2867 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2868 /* 4MV not implemented */
2869 bytestream_put_byte(&ptr, 0); /* hmv2 */
2870 bytestream_put_byte(&ptr, 0); /* vmv2 */
2873 static void update_mb_info(MpegEncContext *s, int startcode)
2877 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2878 s->mb_info_size += 12;
2879 s->prev_mb_info = s->last_mb_info;
2882 s->prev_mb_info = put_bits_count(&s->pb)/8;
2883 /* This might have incremented mb_info_size above, and we return without
2884 * actually writing any info into that slot yet. But in that case,
2885 * this will be called again at the start of the after writing the
2886 * start code, actually writing the mb info. */
2890 s->last_mb_info = put_bits_count(&s->pb)/8;
2891 if (!s->mb_info_size)
2892 s->mb_info_size += 12;
2896 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2898 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2899 && s->slice_context_count == 1
2900 && s->pb.buf == s->avctx->internal->byte_buffer) {
2901 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2902 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2904 uint8_t *new_buffer = NULL;
2905 int new_buffer_size = 0;
2907 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2908 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2909 return AVERROR(ENOMEM);
2914 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2915 s->avctx->internal->byte_buffer_size + size_increase);
2917 return AVERROR(ENOMEM);
2919 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2920 av_free(s->avctx->internal->byte_buffer);
2921 s->avctx->internal->byte_buffer = new_buffer;
2922 s->avctx->internal->byte_buffer_size = new_buffer_size;
2923 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2924 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2925 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2927 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2928 return AVERROR(EINVAL);
2932 static int encode_thread(AVCodecContext *c, void *arg){
2933 MpegEncContext *s= *(void**)arg;
2934 int mb_x, mb_y, mb_y_order;
2935 int chr_h= 16>>s->chroma_y_shift;
2937 MpegEncContext best_s = { 0 }, backup_s;
2938 uint8_t bit_buf[2][MAX_MB_BYTES];
2939 uint8_t bit_buf2[2][MAX_MB_BYTES];
2940 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2941 PutBitContext pb[2], pb2[2], tex_pb[2];
2944 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2945 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2946 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2949 s->last_bits= put_bits_count(&s->pb);
2960 /* init last dc values */
2961 /* note: quant matrix value (8) is implied here */
2962 s->last_dc[i] = 128 << s->intra_dc_precision;
2964 s->current_picture.encoding_error[i] = 0;
2966 if(s->codec_id==AV_CODEC_ID_AMV){
2967 s->last_dc[0] = 128*8/13;
2968 s->last_dc[1] = 128*8/14;
2969 s->last_dc[2] = 128*8/14;
2972 memset(s->last_mv, 0, sizeof(s->last_mv));
2976 switch(s->codec_id){
2977 case AV_CODEC_ID_H263:
2978 case AV_CODEC_ID_H263P:
2979 case AV_CODEC_ID_FLV1:
2980 if (CONFIG_H263_ENCODER)
2981 s->gob_index = H263_GOB_HEIGHT(s->height);
2983 case AV_CODEC_ID_MPEG4:
2984 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2985 ff_mpeg4_init_partitions(s);
2991 s->first_slice_line = 1;
2992 s->ptr_lastgob = s->pb.buf;
2993 for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2994 if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
2996 mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
2997 if (first_in_slice && mb_y_order != s->start_mb_y)
2998 ff_speedhq_end_slice(s);
2999 s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
3006 ff_set_qscale(s, s->qscale);
3007 ff_init_block_index(s);
3009 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3010 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3011 int mb_type= s->mb_type[xy];
3015 int size_increase = s->avctx->internal->byte_buffer_size/4
3016 + s->mb_width*MAX_MB_BYTES;
3018 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
3019 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
3020 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3023 if(s->data_partitioning){
3024 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3025 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3026 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3032 s->mb_y = mb_y; // moved into loop, can get changed by H.261
3033 ff_update_block_index(s);
3035 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3036 ff_h261_reorder_mb_index(s);
3037 xy= s->mb_y*s->mb_stride + s->mb_x;
3038 mb_type= s->mb_type[xy];
3041 /* write gob / video packet header */
3043 int current_packet_size, is_gob_start;
3045 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3047 is_gob_start = s->rtp_payload_size &&
3048 current_packet_size >= s->rtp_payload_size &&
3051 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3053 switch(s->codec_id){
3054 case AV_CODEC_ID_H263:
3055 case AV_CODEC_ID_H263P:
3056 if(!s->h263_slice_structured)
3057 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3059 case AV_CODEC_ID_MPEG2VIDEO:
3060 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3061 case AV_CODEC_ID_MPEG1VIDEO:
3062 if(s->mb_skip_run) is_gob_start=0;
3064 case AV_CODEC_ID_MJPEG:
3065 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3070 if(s->start_mb_y != mb_y || mb_x!=0){
3073 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3074 ff_mpeg4_init_partitions(s);
3078 av_assert2((put_bits_count(&s->pb)&7) == 0);
3079 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3081 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3082 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3083 int d = 100 / s->error_rate;
3085 current_packet_size=0;
3086 s->pb.buf_ptr= s->ptr_lastgob;
3087 av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3091 #if FF_API_RTP_CALLBACK
3092 FF_DISABLE_DEPRECATION_WARNINGS
3093 if (s->avctx->rtp_callback){
3094 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3095 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3097 FF_ENABLE_DEPRECATION_WARNINGS
3099 update_mb_info(s, 1);
3101 switch(s->codec_id){
3102 case AV_CODEC_ID_MPEG4:
3103 if (CONFIG_MPEG4_ENCODER) {
3104 ff_mpeg4_encode_video_packet_header(s);
3105 ff_mpeg4_clean_buffers(s);
3108 case AV_CODEC_ID_MPEG1VIDEO:
3109 case AV_CODEC_ID_MPEG2VIDEO:
3110 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3111 ff_mpeg1_encode_slice_header(s);
3112 ff_mpeg1_clean_buffers(s);
3115 case AV_CODEC_ID_H263:
3116 case AV_CODEC_ID_H263P:
3117 if (CONFIG_H263_ENCODER)
3118 ff_h263_encode_gob_header(s, mb_y);
3122 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3123 int bits= put_bits_count(&s->pb);
3124 s->misc_bits+= bits - s->last_bits;
3128 s->ptr_lastgob += current_packet_size;
3129 s->first_slice_line=1;
3130 s->resync_mb_x=mb_x;
3131 s->resync_mb_y=mb_y;
3135 if( (s->resync_mb_x == s->mb_x)
3136 && s->resync_mb_y+1 == s->mb_y){
3137 s->first_slice_line=0;
3141 s->dquant=0; //only for QP_RD
3143 update_mb_info(s, 0);
3145 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3147 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3149 copy_context_before_encode(&backup_s, s, -1);
3151 best_s.data_partitioning= s->data_partitioning;
3152 best_s.partitioned_frame= s->partitioned_frame;
3153 if(s->data_partitioning){
3154 backup_s.pb2= s->pb2;
3155 backup_s.tex_pb= s->tex_pb;
3158 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3159 s->mv_dir = MV_DIR_FORWARD;
3160 s->mv_type = MV_TYPE_16X16;
3162 s->mv[0][0][0] = s->p_mv_table[xy][0];
3163 s->mv[0][0][1] = s->p_mv_table[xy][1];
3164 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3165 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3167 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3168 s->mv_dir = MV_DIR_FORWARD;
3169 s->mv_type = MV_TYPE_FIELD;
3172 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3173 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3174 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3176 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3177 &dmin, &next_block, 0, 0);
3179 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3180 s->mv_dir = MV_DIR_FORWARD;
3181 s->mv_type = MV_TYPE_16X16;
3185 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3186 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3188 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3189 s->mv_dir = MV_DIR_FORWARD;
3190 s->mv_type = MV_TYPE_8X8;
3193 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3194 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3196 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3197 &dmin, &next_block, 0, 0);
3199 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3200 s->mv_dir = MV_DIR_FORWARD;
3201 s->mv_type = MV_TYPE_16X16;
3203 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3204 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3205 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3206 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3208 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3209 s->mv_dir = MV_DIR_BACKWARD;
3210 s->mv_type = MV_TYPE_16X16;
3212 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3213 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3214 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3215 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3217 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3218 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3219 s->mv_type = MV_TYPE_16X16;
3221 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3222 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3223 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3224 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3225 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3226 &dmin, &next_block, 0, 0);
3228 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3229 s->mv_dir = MV_DIR_FORWARD;
3230 s->mv_type = MV_TYPE_FIELD;
3233 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3234 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3235 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3237 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3238 &dmin, &next_block, 0, 0);
3240 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3241 s->mv_dir = MV_DIR_BACKWARD;
3242 s->mv_type = MV_TYPE_FIELD;
3245 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3246 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3247 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3249 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3250 &dmin, &next_block, 0, 0);
3252 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3253 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3254 s->mv_type = MV_TYPE_FIELD;
3256 for(dir=0; dir<2; dir++){
3258 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3259 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3260 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3263 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3264 &dmin, &next_block, 0, 0);
3266 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3268 s->mv_type = MV_TYPE_16X16;
3272 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3273 &dmin, &next_block, 0, 0);
3274 if(s->h263_pred || s->h263_aic){
3276 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3278 ff_clean_intra_table_entries(s); //old mode?
3282 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3283 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3284 const int last_qp= backup_s.qscale;
3287 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3288 static const int dquant_tab[4]={-1,1,-2,2};
3289 int storecoefs = s->mb_intra && s->dc_val[0];
3291 av_assert2(backup_s.dquant == 0);
3294 s->mv_dir= best_s.mv_dir;
3295 s->mv_type = MV_TYPE_16X16;
3296 s->mb_intra= best_s.mb_intra;
3297 s->mv[0][0][0] = best_s.mv[0][0][0];
3298 s->mv[0][0][1] = best_s.mv[0][0][1];
3299 s->mv[1][0][0] = best_s.mv[1][0][0];
3300 s->mv[1][0][1] = best_s.mv[1][0][1];
3302 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3303 for(; qpi<4; qpi++){
3304 int dquant= dquant_tab[qpi];
3305 qp= last_qp + dquant;
3306 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3308 backup_s.dquant= dquant;
3311 dc[i]= s->dc_val[0][ s->block_index[i] ];
3312 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3316 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3317 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3318 if(best_s.qscale != qp){
3321 s->dc_val[0][ s->block_index[i] ]= dc[i];
3322 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3329 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3330 int mx= s->b_direct_mv_table[xy][0];
3331 int my= s->b_direct_mv_table[xy][1];
3333 backup_s.dquant = 0;
3334 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3336 ff_mpeg4_set_direct_mv(s, mx, my);
3337 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3338 &dmin, &next_block, mx, my);
3340 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3341 backup_s.dquant = 0;
3342 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3344 ff_mpeg4_set_direct_mv(s, 0, 0);
3345 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3346 &dmin, &next_block, 0, 0);
3348 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3351 coded |= s->block_last_index[i];
3354 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3355 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3356 mx=my=0; //FIXME find the one we actually used
3357 ff_mpeg4_set_direct_mv(s, mx, my);
3358 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3366 s->mv_dir= best_s.mv_dir;
3367 s->mv_type = best_s.mv_type;
3369 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3370 s->mv[0][0][1] = best_s.mv[0][0][1];
3371 s->mv[1][0][0] = best_s.mv[1][0][0];
3372 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3375 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3376 &dmin, &next_block, mx, my);
3381 s->current_picture.qscale_table[xy] = best_s.qscale;
3383 copy_context_after_encode(s, &best_s, -1);
3385 pb_bits_count= put_bits_count(&s->pb);
3386 flush_put_bits(&s->pb);
3387 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3390 if(s->data_partitioning){
3391 pb2_bits_count= put_bits_count(&s->pb2);
3392 flush_put_bits(&s->pb2);
3393 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3394 s->pb2= backup_s.pb2;
3396 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3397 flush_put_bits(&s->tex_pb);
3398 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3399 s->tex_pb= backup_s.tex_pb;
3401 s->last_bits= put_bits_count(&s->pb);
3403 if (CONFIG_H263_ENCODER &&
3404 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3405 ff_h263_update_motion_val(s);
3407 if(next_block==0){ //FIXME 16 vs linesize16
3408 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3409 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3410 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3413 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3414 ff_mpv_reconstruct_mb(s, s->block);
3416 int motion_x = 0, motion_y = 0;
3417 s->mv_type=MV_TYPE_16X16;
3418 // only one MB-Type possible
3421 case CANDIDATE_MB_TYPE_INTRA:
3424 motion_x= s->mv[0][0][0] = 0;
3425 motion_y= s->mv[0][0][1] = 0;
3427 case CANDIDATE_MB_TYPE_INTER:
3428 s->mv_dir = MV_DIR_FORWARD;
3430 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3431 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3433 case CANDIDATE_MB_TYPE_INTER_I:
3434 s->mv_dir = MV_DIR_FORWARD;
3435 s->mv_type = MV_TYPE_FIELD;
3438 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3439 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3440 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3443 case CANDIDATE_MB_TYPE_INTER4V:
3444 s->mv_dir = MV_DIR_FORWARD;
3445 s->mv_type = MV_TYPE_8X8;
3448 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3449 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3452 case CANDIDATE_MB_TYPE_DIRECT:
3453 if (CONFIG_MPEG4_ENCODER) {
3454 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3456 motion_x=s->b_direct_mv_table[xy][0];
3457 motion_y=s->b_direct_mv_table[xy][1];
3458 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3461 case CANDIDATE_MB_TYPE_DIRECT0:
3462 if (CONFIG_MPEG4_ENCODER) {
3463 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3465 ff_mpeg4_set_direct_mv(s, 0, 0);
3468 case CANDIDATE_MB_TYPE_BIDIR:
3469 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3471 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3472 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3473 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3474 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3476 case CANDIDATE_MB_TYPE_BACKWARD:
3477 s->mv_dir = MV_DIR_BACKWARD;
3479 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3480 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3482 case CANDIDATE_MB_TYPE_FORWARD:
3483 s->mv_dir = MV_DIR_FORWARD;
3485 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3486 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3488 case CANDIDATE_MB_TYPE_FORWARD_I:
3489 s->mv_dir = MV_DIR_FORWARD;
3490 s->mv_type = MV_TYPE_FIELD;
3493 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3494 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3495 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3498 case CANDIDATE_MB_TYPE_BACKWARD_I:
3499 s->mv_dir = MV_DIR_BACKWARD;
3500 s->mv_type = MV_TYPE_FIELD;
3503 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3504 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3505 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3508 case CANDIDATE_MB_TYPE_BIDIR_I:
3509 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3510 s->mv_type = MV_TYPE_FIELD;
3512 for(dir=0; dir<2; dir++){
3514 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3515 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3516 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3521 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3524 encode_mb(s, motion_x, motion_y);
3526 // RAL: Update last macroblock type
3527 s->last_mv_dir = s->mv_dir;
3529 if (CONFIG_H263_ENCODER &&
3530 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3531 ff_h263_update_motion_val(s);
3533 ff_mpv_reconstruct_mb(s, s->block);
3536 /* clean the MV table in IPS frames for direct mode in B-frames */
3537 if(s->mb_intra /* && I,P,S_TYPE */){
3538 s->p_mv_table[xy][0]=0;
3539 s->p_mv_table[xy][1]=0;
3542 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3546 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3547 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3549 s->current_picture.encoding_error[0] += sse(
3550 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3551 s->dest[0], w, h, s->linesize);
3552 s->current_picture.encoding_error[1] += sse(
3553 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3554 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3555 s->current_picture.encoding_error[2] += sse(
3556 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3557 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3560 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3561 ff_h263_loop_filter(s);
3563 ff_dlog(s->avctx, "MB %d %d bits\n",
3564 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3568 //not beautiful here but we must write it before flushing so it has to be here
3569 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3570 ff_msmpeg4_encode_ext_header(s);
3574 #if FF_API_RTP_CALLBACK
3575 FF_DISABLE_DEPRECATION_WARNINGS
3576 /* Send the last GOB if RTP */
3577 if (s->avctx->rtp_callback) {
3578 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3579 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3580 /* Call the RTP callback to send the last GOB */
3582 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3584 FF_ENABLE_DEPRECATION_WARNINGS
3590 #define MERGE(field) dst->field += src->field; src->field=0
3591 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3592 MERGE(me.scene_change_score);
3593 MERGE(me.mc_mb_var_sum_temp);
3594 MERGE(me.mb_var_sum_temp);
3597 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3600 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3601 MERGE(dct_count[1]);
3610 MERGE(er.error_count);
3611 MERGE(padding_bug_score);
3612 MERGE(current_picture.encoding_error[0]);
3613 MERGE(current_picture.encoding_error[1]);
3614 MERGE(current_picture.encoding_error[2]);
3616 if (dst->noise_reduction){
3617 for(i=0; i<64; i++){
3618 MERGE(dct_error_sum[0][i]);
3619 MERGE(dct_error_sum[1][i]);
3623 av_assert1(put_bits_count(&src->pb) % 8 ==0);
3624 av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3625 ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3626 flush_put_bits(&dst->pb);
3629 static int estimate_qp(MpegEncContext *s, int dry_run){
3630 if (s->next_lambda){
3631 s->current_picture_ptr->f->quality =
3632 s->current_picture.f->quality = s->next_lambda;
3633 if(!dry_run) s->next_lambda= 0;
3634 } else if (!s->fixed_qscale) {
3635 int quality = ff_rate_estimate_qscale(s, dry_run);
3636 s->current_picture_ptr->f->quality =
3637 s->current_picture.f->quality = quality;
3638 if (s->current_picture.f->quality < 0)
3642 if(s->adaptive_quant){
3643 switch(s->codec_id){
3644 case AV_CODEC_ID_MPEG4:
3645 if (CONFIG_MPEG4_ENCODER)
3646 ff_clean_mpeg4_qscales(s);
3648 case AV_CODEC_ID_H263:
3649 case AV_CODEC_ID_H263P:
3650 case AV_CODEC_ID_FLV1:
3651 if (CONFIG_H263_ENCODER)
3652 ff_clean_h263_qscales(s);
3655 ff_init_qscale_tab(s);
3658 s->lambda= s->lambda_table[0];
3661 s->lambda = s->current_picture.f->quality;
3666 /* must be called before writing the header */
3667 static void set_frame_distances(MpegEncContext * s){
3668 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3669 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3671 if(s->pict_type==AV_PICTURE_TYPE_B){
3672 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3673 av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3675 s->pp_time= s->time - s->last_non_b_time;
3676 s->last_non_b_time= s->time;
3677 av_assert1(s->picture_number==0 || s->pp_time > 0);
3681 static int encode_picture(MpegEncContext *s, int picture_number)
3685 int context_count = s->slice_context_count;
3687 s->picture_number = picture_number;
3689 /* Reset the average MB variance */
3690 s->me.mb_var_sum_temp =
3691 s->me.mc_mb_var_sum_temp = 0;
3693 /* we need to initialize some time vars before we can encode B-frames */
3694 // RAL: Condition added for MPEG1VIDEO
3695 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3696 set_frame_distances(s);
3697 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3698 ff_set_mpeg4_time(s);
3700 s->me.scene_change_score=0;
3702 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3704 if(s->pict_type==AV_PICTURE_TYPE_I){
3705 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3706 else s->no_rounding=0;
3707 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3708 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3709 s->no_rounding ^= 1;
3712 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3713 if (estimate_qp(s,1) < 0)
3715 ff_get_2pass_fcode(s);
3716 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3717 if(s->pict_type==AV_PICTURE_TYPE_B)
3718 s->lambda= s->last_lambda_for[s->pict_type];
3720 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3724 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3725 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3726 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3727 s->q_chroma_intra_matrix = s->q_intra_matrix;
3728 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3731 s->mb_intra=0; //for the rate distortion & bit compare functions
3732 for(i=1; i<context_count; i++){
3733 ret = ff_update_duplicate_context(s->thread_context[i], s);
3741 /* Estimate motion for every MB */
3742 if(s->pict_type != AV_PICTURE_TYPE_I){
3743 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3744 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3745 if (s->pict_type != AV_PICTURE_TYPE_B) {
3746 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3748 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3752 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3753 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3755 for(i=0; i<s->mb_stride*s->mb_height; i++)
3756 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3758 if(!s->fixed_qscale){
3759 /* finding spatial complexity for I-frame rate control */
3760 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3763 for(i=1; i<context_count; i++){
3764 merge_context_after_me(s, s->thread_context[i]);
3766 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3767 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3770 if (s->me.scene_change_score > s->scenechange_threshold &&
3771 s->pict_type == AV_PICTURE_TYPE_P) {
3772 s->pict_type= AV_PICTURE_TYPE_I;
3773 for(i=0; i<s->mb_stride*s->mb_height; i++)
3774 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3775 if(s->msmpeg4_version >= 3)
3777 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3778 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3782 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3783 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3785 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3787 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3788 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3789 s->f_code= FFMAX3(s->f_code, a, b);
3792 ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
3793 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3794 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3798 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3799 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3804 if(s->pict_type==AV_PICTURE_TYPE_B){
3807 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3808 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3809 s->f_code = FFMAX(a, b);
3811 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3812 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3813 s->b_code = FFMAX(a, b);
3815 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3816 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3817 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3818 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3819 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3821 for(dir=0; dir<2; dir++){
3824 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3825 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3826 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3827 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3835 if (estimate_qp(s, 0) < 0)
3838 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3839 s->pict_type == AV_PICTURE_TYPE_I &&
3840 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3841 s->qscale= 3; //reduce clipping problems
3843 if (s->out_format == FMT_MJPEG) {
3844 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3845 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3847 if (s->avctx->intra_matrix) {
3849 luma_matrix = s->avctx->intra_matrix;
3851 if (s->avctx->chroma_intra_matrix)
3852 chroma_matrix = s->avctx->chroma_intra_matrix;
3854 /* for mjpeg, we do include qscale in the matrix */
3856 int j = s->idsp.idct_permutation[i];
3858 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3859 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3861 s->y_dc_scale_table=
3862 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3863 s->chroma_intra_matrix[0] =
3864 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3865 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3866 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3867 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3868 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3871 if(s->codec_id == AV_CODEC_ID_AMV){
3872 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3873 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3875 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3877 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3878 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3880 s->y_dc_scale_table= y;
3881 s->c_dc_scale_table= c;
3882 s->intra_matrix[0] = 13;
3883 s->chroma_intra_matrix[0] = 14;
3884 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3885 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3886 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3887 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3891 if (s->out_format == FMT_SPEEDHQ) {
3892 s->y_dc_scale_table=
3893 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3896 //FIXME var duplication
3897 s->current_picture_ptr->f->key_frame =
3898 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3899 s->current_picture_ptr->f->pict_type =
3900 s->current_picture.f->pict_type = s->pict_type;
3902 if (s->current_picture.f->key_frame)
3903 s->picture_in_gop_number=0;
3905 s->mb_x = s->mb_y = 0;
3906 s->last_bits= put_bits_count(&s->pb);
3907 switch(s->out_format) {
3909 if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3910 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3911 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3914 if (CONFIG_SPEEDHQ_ENCODER)
3915 ff_speedhq_encode_picture_header(s);
3918 if (CONFIG_H261_ENCODER)
3919 ff_h261_encode_picture_header(s, picture_number);
3922 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3923 ff_wmv2_encode_picture_header(s, picture_number);
3924 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3925 ff_msmpeg4_encode_picture_header(s, picture_number);
3926 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3927 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3930 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3931 ret = ff_rv10_encode_picture_header(s, picture_number);
3935 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3936 ff_rv20_encode_picture_header(s, picture_number);
3937 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3938 ff_flv_encode_picture_header(s, picture_number);
3939 else if (CONFIG_H263_ENCODER)
3940 ff_h263_encode_picture_header(s, picture_number);
3943 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3944 ff_mpeg1_encode_picture_header(s, picture_number);
3949 bits= put_bits_count(&s->pb);
3950 s->header_bits= bits - s->last_bits;
3952 for(i=1; i<context_count; i++){
3953 update_duplicate_context_after_me(s->thread_context[i], s);
3955 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3956 for(i=1; i<context_count; i++){
3957 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3958 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3959 merge_context_after_encode(s, s->thread_context[i]);
3965 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3966 const int intra= s->mb_intra;
3969 s->dct_count[intra]++;
3971 for(i=0; i<64; i++){
3972 int level= block[i];
3976 s->dct_error_sum[intra][i] += level;
3977 level -= s->dct_offset[intra][i];
3978 if(level<0) level=0;
3980 s->dct_error_sum[intra][i] -= level;
3981 level += s->dct_offset[intra][i];
3982 if(level>0) level=0;
3989 static int dct_quantize_trellis_c(MpegEncContext *s,
3990 int16_t *block, int n,
3991 int qscale, int *overflow){
3993 const uint16_t *matrix;
3994 const uint8_t *scantable;
3995 const uint8_t *perm_scantable;
3997 unsigned int threshold1, threshold2;
4009 int coeff_count[64];
4010 int qmul, qadd, start_i, last_non_zero, i, dc;
4011 const int esc_length= s->ac_esc_length;
4013 uint8_t * last_length;
4014 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4017 s->fdsp.fdct(block);
4019 if(s->dct_error_sum)
4020 s->denoise_dct(s, block);
4022 qadd= ((qscale-1)|1)*8;
4024 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4025 else mpeg2_qscale = qscale << 1;
4029 scantable= s->intra_scantable.scantable;
4030 perm_scantable= s->intra_scantable.permutated;
4038 /* For AIC we skip quant/dequant of INTRADC */
4043 /* note: block[0] is assumed to be positive */
4044 block[0] = (block[0] + (q >> 1)) / q;
4047 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4048 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4049 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4050 bias= 1<<(QMAT_SHIFT-1);
4052 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4053 length = s->intra_chroma_ac_vlc_length;
4054 last_length= s->intra_chroma_ac_vlc_last_length;
4056 length = s->intra_ac_vlc_length;
4057 last_length= s->intra_ac_vlc_last_length;
4060 scantable= s->inter_scantable.scantable;
4061 perm_scantable= s->inter_scantable.permutated;
4064 qmat = s->q_inter_matrix[qscale];
4065 matrix = s->inter_matrix;
4066 length = s->inter_ac_vlc_length;
4067 last_length= s->inter_ac_vlc_last_length;
4071 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4072 threshold2= (threshold1<<1);
4074 for(i=63; i>=start_i; i--) {
4075 const int j = scantable[i];
4076 int level = block[j] * qmat[j];
4078 if(((unsigned)(level+threshold1))>threshold2){
4084 for(i=start_i; i<=last_non_zero; i++) {
4085 const int j = scantable[i];
4086 int level = block[j] * qmat[j];
4088 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4089 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4090 if(((unsigned)(level+threshold1))>threshold2){
4092 level= (bias + level)>>QMAT_SHIFT;
4094 coeff[1][i]= level-1;
4095 // coeff[2][k]= level-2;
4097 level= (bias - level)>>QMAT_SHIFT;
4098 coeff[0][i]= -level;
4099 coeff[1][i]= -level+1;
4100 // coeff[2][k]= -level+2;
4102 coeff_count[i]= FFMIN(level, 2);
4103 av_assert2(coeff_count[i]);
4106 coeff[0][i]= (level>>31)|1;
4111 *overflow= s->max_qcoeff < max; //overflow might have happened
4113 if(last_non_zero < start_i){
4114 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4115 return last_non_zero;
4118 score_tab[start_i]= 0;
4119 survivor[0]= start_i;
4122 for(i=start_i; i<=last_non_zero; i++){
4123 int level_index, j, zero_distortion;
4124 int dct_coeff= FFABS(block[ scantable[i] ]);
4125 int best_score=256*256*256*120;
4127 if (s->fdsp.fdct == ff_fdct_ifast)
4128 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4129 zero_distortion= dct_coeff*dct_coeff;
4131 for(level_index=0; level_index < coeff_count[i]; level_index++){
4133 int level= coeff[level_index][i];
4134 const int alevel= FFABS(level);
4139 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4140 unquant_coeff= alevel*qmul + qadd;
4141 } else if(s->out_format == FMT_MJPEG) {
4142 j = s->idsp.idct_permutation[scantable[i]];
4143 unquant_coeff = alevel * matrix[j] * 8;
4145 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4147 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4148 unquant_coeff = (unquant_coeff - 1) | 1;
4150 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4151 unquant_coeff = (unquant_coeff - 1) | 1;
4156 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4158 if((level&(~127)) == 0){
4159 for(j=survivor_count-1; j>=0; j--){
4160 int run= i - survivor[j];
4161 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4162 score += score_tab[i-run];
4164 if(score < best_score){
4167 level_tab[i+1]= level-64;
4171 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4172 for(j=survivor_count-1; j>=0; j--){
4173 int run= i - survivor[j];
4174 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4175 score += score_tab[i-run];
4176 if(score < last_score){
4179 last_level= level-64;
4185 distortion += esc_length*lambda;
4186 for(j=survivor_count-1; j>=0; j--){
4187 int run= i - survivor[j];
4188 int score= distortion + score_tab[i-run];
4190 if(score < best_score){
4193 level_tab[i+1]= level-64;
4197 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4198 for(j=survivor_count-1; j>=0; j--){
4199 int run= i - survivor[j];
4200 int score= distortion + score_tab[i-run];
4201 if(score < last_score){
4204 last_level= level-64;
4212 score_tab[i+1]= best_score;
4214 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4215 if(last_non_zero <= 27){
4216 for(; survivor_count; survivor_count--){
4217 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4221 for(; survivor_count; survivor_count--){
4222 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4227 survivor[ survivor_count++ ]= i+1;
4230 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4231 last_score= 256*256*256*120;
4232 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4233 int score= score_tab[i];
4235 score += lambda * 2; // FIXME more exact?
4237 if(score < last_score){
4240 last_level= level_tab[i];
4241 last_run= run_tab[i];
4246 s->coded_score[n] = last_score;
4248 dc= FFABS(block[0]);
4249 last_non_zero= last_i - 1;
4250 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4252 if(last_non_zero < start_i)
4253 return last_non_zero;
4255 if(last_non_zero == 0 && start_i == 0){
4257 int best_score= dc * dc;
4259 for(i=0; i<coeff_count[0]; i++){
4260 int level= coeff[i][0];
4261 int alevel= FFABS(level);
4262 int unquant_coeff, score, distortion;
4264 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4265 unquant_coeff= (alevel*qmul + qadd)>>3;
4267 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4268 unquant_coeff = (unquant_coeff - 1) | 1;
4270 unquant_coeff = (unquant_coeff + 4) >> 3;
4271 unquant_coeff<<= 3 + 3;
4273 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4275 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4276 else score= distortion + esc_length*lambda;
4278 if(score < best_score){
4280 best_level= level - 64;
4283 block[0]= best_level;
4284 s->coded_score[n] = best_score - dc*dc;
4285 if(best_level == 0) return -1;
4286 else return last_non_zero;
4290 av_assert2(last_level);
4292 block[ perm_scantable[last_non_zero] ]= last_level;
4295 for(; i>start_i; i -= run_tab[i] + 1){
4296 block[ perm_scantable[i-1] ]= level_tab[i];
4299 return last_non_zero;
4302 static int16_t basis[64][64];
4304 static void build_basis(uint8_t *perm){
4311 double s= 0.25*(1<<BASIS_SHIFT);
4313 int perm_index= perm[index];
4314 if(i==0) s*= sqrt(0.5);
4315 if(j==0) s*= sqrt(0.5);
4316 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4323 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4324 int16_t *block, int16_t *weight, int16_t *orig,
4327 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4328 const uint8_t *scantable;
4329 const uint8_t *perm_scantable;
4330 // unsigned int threshold1, threshold2;
4335 int qmul, qadd, start_i, last_non_zero, i, dc;
4337 uint8_t * last_length;
4339 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4341 if(basis[0][0] == 0)
4342 build_basis(s->idsp.idct_permutation);
4347 scantable= s->intra_scantable.scantable;
4348 perm_scantable= s->intra_scantable.permutated;
4355 /* For AIC we skip quant/dequant of INTRADC */
4359 q <<= RECON_SHIFT-3;
4360 /* note: block[0] is assumed to be positive */
4362 // block[0] = (block[0] + (q >> 1)) / q;
4364 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4365 // bias= 1<<(QMAT_SHIFT-1);
4366 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4367 length = s->intra_chroma_ac_vlc_length;
4368 last_length= s->intra_chroma_ac_vlc_last_length;
4370 length = s->intra_ac_vlc_length;
4371 last_length= s->intra_ac_vlc_last_length;
4374 scantable= s->inter_scantable.scantable;
4375 perm_scantable= s->inter_scantable.permutated;
4378 length = s->inter_ac_vlc_length;
4379 last_length= s->inter_ac_vlc_last_length;
4381 last_non_zero = s->block_last_index[n];
4383 dc += (1<<(RECON_SHIFT-1));
4384 for(i=0; i<64; i++){
4385 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4389 for(i=0; i<64; i++){
4394 w= FFABS(weight[i]) + qns*one;
4395 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4398 // w=weight[i] = (63*qns + (w/2)) / w;
4401 av_assert2(w<(1<<6));
4404 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4408 for(i=start_i; i<=last_non_zero; i++){
4409 int j= perm_scantable[i];
4410 const int level= block[j];
4414 if(level<0) coeff= qmul*level - qadd;
4415 else coeff= qmul*level + qadd;
4416 run_tab[rle_index++]=run;
4419 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4426 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4429 int run2, best_unquant_change=0, analyze_gradient;
4430 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4432 if(analyze_gradient){
4433 for(i=0; i<64; i++){
4436 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4442 const int level= block[0];
4443 int change, old_coeff;
4445 av_assert2(s->mb_intra);
4449 for(change=-1; change<=1; change+=2){
4450 int new_level= level + change;
4451 int score, new_coeff;
4453 new_coeff= q*new_level;
4454 if(new_coeff >= 2048 || new_coeff < 0)
4457 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4458 new_coeff - old_coeff);
4459 if(score<best_score){
4462 best_change= change;
4463 best_unquant_change= new_coeff - old_coeff;
4470 run2= run_tab[rle_index++];
4474 for(i=start_i; i<64; i++){
4475 int j= perm_scantable[i];
4476 const int level= block[j];
4477 int change, old_coeff;
4479 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4483 if(level<0) old_coeff= qmul*level - qadd;
4484 else old_coeff= qmul*level + qadd;
4485 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4489 av_assert2(run2>=0 || i >= last_non_zero );
4492 for(change=-1; change<=1; change+=2){
4493 int new_level= level + change;
4494 int score, new_coeff, unquant_change;
4497 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4501 if(new_level<0) new_coeff= qmul*new_level - qadd;
4502 else new_coeff= qmul*new_level + qadd;
4503 if(new_coeff >= 2048 || new_coeff <= -2048)
4505 //FIXME check for overflow
4508 if(level < 63 && level > -63){
4509 if(i < last_non_zero)
4510 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4511 - length[UNI_AC_ENC_INDEX(run, level+64)];
4513 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4514 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4517 av_assert2(FFABS(new_level)==1);
4519 if(analyze_gradient){
4520 int g= d1[ scantable[i] ];
4521 if(g && (g^new_level) >= 0)
4525 if(i < last_non_zero){
4526 int next_i= i + run2 + 1;
4527 int next_level= block[ perm_scantable[next_i] ] + 64;
4529 if(next_level&(~127))
4532 if(next_i < last_non_zero)
4533 score += length[UNI_AC_ENC_INDEX(run, 65)]
4534 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4535 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4537 score += length[UNI_AC_ENC_INDEX(run, 65)]
4538 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4539 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4541 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4543 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4544 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4550 av_assert2(FFABS(level)==1);
4552 if(i < last_non_zero){
4553 int next_i= i + run2 + 1;
4554 int next_level= block[ perm_scantable[next_i] ] + 64;
4556 if(next_level&(~127))
4559 if(next_i < last_non_zero)
4560 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4561 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4562 - length[UNI_AC_ENC_INDEX(run, 65)];
4564 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4565 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4566 - length[UNI_AC_ENC_INDEX(run, 65)];
4568 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4570 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4571 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4578 unquant_change= new_coeff - old_coeff;
4579 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4581 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4583 if(score<best_score){
4586 best_change= change;
4587 best_unquant_change= unquant_change;
4591 prev_level= level + 64;
4592 if(prev_level&(~127))
4602 int j= perm_scantable[ best_coeff ];
4604 block[j] += best_change;
4606 if(best_coeff > last_non_zero){
4607 last_non_zero= best_coeff;
4608 av_assert2(block[j]);
4610 for(; last_non_zero>=start_i; last_non_zero--){
4611 if(block[perm_scantable[last_non_zero]])
4618 for(i=start_i; i<=last_non_zero; i++){
4619 int j= perm_scantable[i];
4620 const int level= block[j];
4623 run_tab[rle_index++]=run;
4630 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4636 return last_non_zero;
4640 * Permute an 8x8 block according to permutation.
4641 * @param block the block which will be permuted according to
4642 * the given permutation vector
4643 * @param permutation the permutation vector
4644 * @param last the last non zero coefficient in scantable order, used to
4645 * speed the permutation up
4646 * @param scantable the used scantable, this is only used to speed the
4647 * permutation up, the block is not (inverse) permutated
4648 * to scantable order!
4650 void ff_block_permute(int16_t *block, uint8_t *permutation,
4651 const uint8_t *scantable, int last)
4658 //FIXME it is ok but not clean and might fail for some permutations
4659 // if (permutation[1] == 1)
4662 for (i = 0; i <= last; i++) {
4663 const int j = scantable[i];
4668 for (i = 0; i <= last; i++) {
4669 const int j = scantable[i];
4670 const int perm_j = permutation[j];
4671 block[perm_j] = temp[j];
4675 int ff_dct_quantize_c(MpegEncContext *s,
4676 int16_t *block, int n,
4677 int qscale, int *overflow)
4679 int i, j, level, last_non_zero, q, start_i;
4681 const uint8_t *scantable;
4684 unsigned int threshold1, threshold2;
4686 s->fdsp.fdct(block);
4688 if(s->dct_error_sum)
4689 s->denoise_dct(s, block);
4692 scantable= s->intra_scantable.scantable;
4700 /* For AIC we skip quant/dequant of INTRADC */
4703 /* note: block[0] is assumed to be positive */
4704 block[0] = (block[0] + (q >> 1)) / q;
4707 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4708 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4710 scantable= s->inter_scantable.scantable;
4713 qmat = s->q_inter_matrix[qscale];
4714 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4716 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4717 threshold2= (threshold1<<1);
4718 for(i=63;i>=start_i;i--) {
4720 level = block[j] * qmat[j];
4722 if(((unsigned)(level+threshold1))>threshold2){
4729 for(i=start_i; i<=last_non_zero; i++) {
4731 level = block[j] * qmat[j];
4733 // if( bias+level >= (1<<QMAT_SHIFT)
4734 // || bias-level >= (1<<QMAT_SHIFT)){
4735 if(((unsigned)(level+threshold1))>threshold2){
4737 level= (bias + level)>>QMAT_SHIFT;
4740 level= (bias - level)>>QMAT_SHIFT;
4748 *overflow= s->max_qcoeff < max; //overflow might have happened
4750 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4751 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4752 ff_block_permute(block, s->idsp.idct_permutation,
4753 scantable, last_non_zero);
4755 return last_non_zero;
4758 #define OFFSET(x) offsetof(MpegEncContext, x)
4759 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4760 static const AVOption h263_options[] = {
4761 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4762 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4767 static const AVClass h263_class = {
4768 .class_name = "H.263 encoder",
4769 .item_name = av_default_item_name,
4770 .option = h263_options,
4771 .version = LIBAVUTIL_VERSION_INT,
4774 AVCodec ff_h263_encoder = {
4776 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4777 .type = AVMEDIA_TYPE_VIDEO,
4778 .id = AV_CODEC_ID_H263,
4779 .priv_data_size = sizeof(MpegEncContext),
4780 .init = ff_mpv_encode_init,
4781 .encode2 = ff_mpv_encode_picture,
4782 .close = ff_mpv_encode_end,
4783 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4784 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4785 .priv_class = &h263_class,
4788 static const AVOption h263p_options[] = {
4789 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4790 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4791 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4792 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4796 static const AVClass h263p_class = {
4797 .class_name = "H.263p encoder",
4798 .item_name = av_default_item_name,
4799 .option = h263p_options,
4800 .version = LIBAVUTIL_VERSION_INT,
4803 AVCodec ff_h263p_encoder = {
4805 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4806 .type = AVMEDIA_TYPE_VIDEO,
4807 .id = AV_CODEC_ID_H263P,
4808 .priv_data_size = sizeof(MpegEncContext),
4809 .init = ff_mpv_encode_init,
4810 .encode2 = ff_mpv_encode_picture,
4811 .close = ff_mpv_encode_end,
4812 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4813 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4814 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4815 .priv_class = &h263p_class,
4818 static const AVClass msmpeg4v2_class = {
4819 .class_name = "msmpeg4v2 encoder",
4820 .item_name = av_default_item_name,
4821 .option = ff_mpv_generic_options,
4822 .version = LIBAVUTIL_VERSION_INT,
4825 AVCodec ff_msmpeg4v2_encoder = {
4826 .name = "msmpeg4v2",
4827 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4828 .type = AVMEDIA_TYPE_VIDEO,
4829 .id = AV_CODEC_ID_MSMPEG4V2,
4830 .priv_data_size = sizeof(MpegEncContext),
4831 .init = ff_mpv_encode_init,
4832 .encode2 = ff_mpv_encode_picture,
4833 .close = ff_mpv_encode_end,
4834 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4835 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4836 .priv_class = &msmpeg4v2_class,
4839 static const AVClass msmpeg4v3_class = {
4840 .class_name = "msmpeg4v3 encoder",
4841 .item_name = av_default_item_name,
4842 .option = ff_mpv_generic_options,
4843 .version = LIBAVUTIL_VERSION_INT,
4846 AVCodec ff_msmpeg4v3_encoder = {
4848 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4849 .type = AVMEDIA_TYPE_VIDEO,
4850 .id = AV_CODEC_ID_MSMPEG4V3,
4851 .priv_data_size = sizeof(MpegEncContext),
4852 .init = ff_mpv_encode_init,
4853 .encode2 = ff_mpv_encode_picture,
4854 .close = ff_mpv_encode_end,
4855 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4856 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4857 .priv_class = &msmpeg4v3_class,
4860 static const AVClass wmv1_class = {
4861 .class_name = "wmv1 encoder",
4862 .item_name = av_default_item_name,
4863 .option = ff_mpv_generic_options,
4864 .version = LIBAVUTIL_VERSION_INT,
4867 AVCodec ff_wmv1_encoder = {
4869 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4870 .type = AVMEDIA_TYPE_VIDEO,
4871 .id = AV_CODEC_ID_WMV1,
4872 .priv_data_size = sizeof(MpegEncContext),
4873 .init = ff_mpv_encode_init,
4874 .encode2 = ff_mpv_encode_picture,
4875 .close = ff_mpv_encode_end,
4876 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4877 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4878 .priv_class = &wmv1_class,