2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/mem_internal.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/opt.h"
46 #include "mpegvideo.h"
47 #include "mpegvideodata.h"
51 #include "mjpegenc_common.h"
53 #include "mpegutils.h"
55 #include "speedhqenc.h"
57 #include "pixblockdsp.h"
61 #include "aandcttab.h"
63 #include "mpeg4video.h"
65 #include "bytestream.h"
68 #include "packet_internal.h"
73 #define QUANT_BIAS_SHIFT 8
75 #define QMAT_SHIFT_MMX 16
78 static int encode_picture(MpegEncContext *s, int picture_number);
79 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
80 static int sse_mb(MpegEncContext *s);
81 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
82 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
84 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
85 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
87 const AVOption ff_mpv_generic_options[] = {
92 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
93 uint16_t (*qmat16)[2][64],
94 const uint16_t *quant_matrix,
95 int bias, int qmin, int qmax, int intra)
97 FDCTDSPContext *fdsp = &s->fdsp;
101 for (qscale = qmin; qscale <= qmax; qscale++) {
105 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
106 else qscale2 = qscale << 1;
108 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
110 fdsp->fdct == ff_faandct ||
111 #endif /* CONFIG_FAANDCT */
112 fdsp->fdct == ff_jpeg_fdct_islow_10) {
113 for (i = 0; i < 64; i++) {
114 const int j = s->idsp.idct_permutation[i];
115 int64_t den = (int64_t) qscale2 * quant_matrix[j];
116 /* 16 <= qscale * quant_matrix[i] <= 7905
117 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
118 * 19952 <= x <= 249205026
119 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
120 * 3444240 >= (1 << 36) / (x) >= 275 */
122 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
124 } else if (fdsp->fdct == ff_fdct_ifast) {
125 for (i = 0; i < 64; i++) {
126 const int j = s->idsp.idct_permutation[i];
127 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
128 /* 16 <= qscale * quant_matrix[i] <= 7905
129 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
130 * 19952 <= x <= 249205026
131 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
132 * 3444240 >= (1 << 36) / (x) >= 275 */
134 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
137 for (i = 0; i < 64; i++) {
138 const int j = s->idsp.idct_permutation[i];
139 int64_t den = (int64_t) qscale2 * quant_matrix[j];
140 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
141 * Assume x = qscale * quant_matrix[i]
143 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
144 * so 32768 >= (1 << 19) / (x) >= 67 */
145 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
146 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
147 // (qscale * quant_matrix[i]);
148 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
150 if (qmat16[qscale][0][i] == 0 ||
151 qmat16[qscale][0][i] == 128 * 256)
152 qmat16[qscale][0][i] = 128 * 256 - 1;
153 qmat16[qscale][1][i] =
154 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
155 qmat16[qscale][0][i]);
159 for (i = intra; i < 64; i++) {
161 if (fdsp->fdct == ff_fdct_ifast) {
162 max = (8191LL * ff_aanscales[i]) >> 14;
164 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
170 av_log(s->avctx, AV_LOG_INFO,
171 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
176 static inline void update_qscale(MpegEncContext *s)
178 if (s->q_scale_type == 1 && 0) {
180 int bestdiff=INT_MAX;
183 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
184 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
185 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
186 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
188 if (diff < bestdiff) {
195 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
196 (FF_LAMBDA_SHIFT + 7);
197 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
200 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
204 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
210 for (i = 0; i < 64; i++) {
211 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
218 * init s->current_picture.qscale_table from s->lambda_table
220 void ff_init_qscale_tab(MpegEncContext *s)
222 int8_t * const qscale_table = s->current_picture.qscale_table;
225 for (i = 0; i < s->mb_num; i++) {
226 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
227 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
228 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
233 static void update_duplicate_context_after_me(MpegEncContext *dst,
236 #define COPY(a) dst->a= src->a
238 COPY(current_picture);
244 COPY(picture_in_gop_number);
245 COPY(gop_picture_number);
246 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
247 COPY(progressive_frame); // FIXME don't set in encode_header
248 COPY(partitioned_frame); // FIXME don't set in encode_header
253 * Set the given MpegEncContext to defaults for encoding.
254 * the changed fields will not depend upon the prior state of the MpegEncContext.
256 static void mpv_encode_defaults(MpegEncContext *s)
259 ff_mpv_common_defaults(s);
261 for (i = -16; i < 16; i++) {
262 default_fcode_tab[i + MAX_MV] = 1;
264 s->me.mv_penalty = default_mv_penalty;
265 s->fcode_tab = default_fcode_tab;
267 s->input_picture_number = 0;
268 s->picture_in_gop_number = 0;
271 av_cold int ff_dct_encode_init(MpegEncContext *s)
274 ff_dct_encode_init_x86(s);
276 if (CONFIG_H263_ENCODER)
277 ff_h263dsp_init(&s->h263dsp);
278 if (!s->dct_quantize)
279 s->dct_quantize = ff_dct_quantize_c;
281 s->denoise_dct = denoise_dct_c;
282 s->fast_dct_quantize = s->dct_quantize;
283 if (s->avctx->trellis)
284 s->dct_quantize = dct_quantize_trellis_c;
289 /* init video encoder */
290 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
292 MpegEncContext *s = avctx->priv_data;
293 AVCPBProperties *cpb_props;
294 int i, ret, format_supported;
296 mpv_encode_defaults(s);
298 switch (avctx->codec_id) {
299 case AV_CODEC_ID_MPEG2VIDEO:
300 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
301 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
302 av_log(avctx, AV_LOG_ERROR,
303 "only YUV420 and YUV422 are supported\n");
304 return AVERROR(EINVAL);
307 case AV_CODEC_ID_MJPEG:
308 case AV_CODEC_ID_AMV:
309 format_supported = 0;
310 /* JPEG color space */
311 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
312 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
313 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
314 (avctx->color_range == AVCOL_RANGE_JPEG &&
315 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
316 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
317 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
318 format_supported = 1;
319 /* MPEG color space */
320 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
321 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
322 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
323 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
324 format_supported = 1;
326 if (!format_supported) {
327 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
328 return AVERROR(EINVAL);
331 case AV_CODEC_ID_SPEEDHQ:
332 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
333 avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
334 avctx->pix_fmt != AV_PIX_FMT_YUV444P) {
335 av_log(avctx, AV_LOG_ERROR,
336 "only YUV420/YUV422/YUV444 are supported (no alpha support yet)\n");
337 return AVERROR(EINVAL);
341 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
342 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
343 return AVERROR(EINVAL);
347 switch (avctx->pix_fmt) {
348 case AV_PIX_FMT_YUVJ444P:
349 case AV_PIX_FMT_YUV444P:
350 s->chroma_format = CHROMA_444;
352 case AV_PIX_FMT_YUVJ422P:
353 case AV_PIX_FMT_YUV422P:
354 s->chroma_format = CHROMA_422;
356 case AV_PIX_FMT_YUVJ420P:
357 case AV_PIX_FMT_YUV420P:
359 s->chroma_format = CHROMA_420;
363 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
365 #if FF_API_PRIVATE_OPT
366 FF_DISABLE_DEPRECATION_WARNINGS
367 if (avctx->rtp_payload_size)
368 s->rtp_payload_size = avctx->rtp_payload_size;
369 if (avctx->me_penalty_compensation)
370 s->me_penalty_compensation = avctx->me_penalty_compensation;
372 s->me_pre = avctx->pre_me;
373 FF_ENABLE_DEPRECATION_WARNINGS
376 s->bit_rate = avctx->bit_rate;
377 s->width = avctx->width;
378 s->height = avctx->height;
379 if (avctx->gop_size > 600 &&
380 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
381 av_log(avctx, AV_LOG_WARNING,
382 "keyframe interval too large!, reducing it from %d to %d\n",
383 avctx->gop_size, 600);
384 avctx->gop_size = 600;
386 s->gop_size = avctx->gop_size;
388 if (avctx->max_b_frames > MAX_B_FRAMES) {
389 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
390 "is %d.\n", MAX_B_FRAMES);
391 avctx->max_b_frames = MAX_B_FRAMES;
393 s->max_b_frames = avctx->max_b_frames;
394 s->codec_id = avctx->codec->id;
395 s->strict_std_compliance = avctx->strict_std_compliance;
396 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
397 s->rtp_mode = !!s->rtp_payload_size;
398 s->intra_dc_precision = avctx->intra_dc_precision;
400 // workaround some differences between how applications specify dc precision
401 if (s->intra_dc_precision < 0) {
402 s->intra_dc_precision += 8;
403 } else if (s->intra_dc_precision >= 8)
404 s->intra_dc_precision -= 8;
406 if (s->intra_dc_precision < 0) {
407 av_log(avctx, AV_LOG_ERROR,
408 "intra dc precision must be positive, note some applications use"
409 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
410 return AVERROR(EINVAL);
413 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
416 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
417 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
418 return AVERROR(EINVAL);
420 s->user_specified_pts = AV_NOPTS_VALUE;
422 if (s->gop_size <= 1) {
430 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
432 s->adaptive_quant = (avctx->lumi_masking ||
433 avctx->dark_masking ||
434 avctx->temporal_cplx_masking ||
435 avctx->spatial_cplx_masking ||
438 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
441 s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
443 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
444 switch(avctx->codec_id) {
445 case AV_CODEC_ID_MPEG1VIDEO:
446 case AV_CODEC_ID_MPEG2VIDEO:
447 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
449 case AV_CODEC_ID_MPEG4:
450 case AV_CODEC_ID_MSMPEG4V1:
451 case AV_CODEC_ID_MSMPEG4V2:
452 case AV_CODEC_ID_MSMPEG4V3:
453 if (avctx->rc_max_rate >= 15000000) {
454 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
455 } else if(avctx->rc_max_rate >= 2000000) {
456 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
457 } else if(avctx->rc_max_rate >= 384000) {
458 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
460 avctx->rc_buffer_size = 40;
461 avctx->rc_buffer_size *= 16384;
464 if (avctx->rc_buffer_size) {
465 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
469 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
470 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
471 return AVERROR(EINVAL);
474 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
475 av_log(avctx, AV_LOG_INFO,
476 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
479 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
480 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
481 return AVERROR(EINVAL);
484 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
485 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
486 return AVERROR(EINVAL);
489 if (avctx->rc_max_rate &&
490 avctx->rc_max_rate == avctx->bit_rate &&
491 avctx->rc_max_rate != avctx->rc_min_rate) {
492 av_log(avctx, AV_LOG_INFO,
493 "impossible bitrate constraints, this will fail\n");
496 if (avctx->rc_buffer_size &&
497 avctx->bit_rate * (int64_t)avctx->time_base.num >
498 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
499 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
500 return AVERROR(EINVAL);
503 if (!s->fixed_qscale &&
504 avctx->bit_rate * av_q2d(avctx->time_base) >
505 avctx->bit_rate_tolerance) {
506 av_log(avctx, AV_LOG_WARNING,
507 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
508 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
511 if (avctx->rc_max_rate &&
512 avctx->rc_min_rate == avctx->rc_max_rate &&
513 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
514 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
515 90000LL * (avctx->rc_buffer_size - 1) >
516 avctx->rc_max_rate * 0xFFFFLL) {
517 av_log(avctx, AV_LOG_INFO,
518 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
519 "specified vbv buffer is too large for the given bitrate!\n");
522 if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
523 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
524 s->codec_id != AV_CODEC_ID_FLV1) {
525 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
526 return AVERROR(EINVAL);
529 if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
530 av_log(avctx, AV_LOG_ERROR,
531 "OBMC is only supported with simple mb decision\n");
532 return AVERROR(EINVAL);
535 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
536 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
537 return AVERROR(EINVAL);
540 if (s->max_b_frames &&
541 s->codec_id != AV_CODEC_ID_MPEG4 &&
542 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
543 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
544 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
545 return AVERROR(EINVAL);
547 if (s->max_b_frames < 0) {
548 av_log(avctx, AV_LOG_ERROR,
549 "max b frames must be 0 or positive for mpegvideo based encoders\n");
550 return AVERROR(EINVAL);
553 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
554 s->codec_id == AV_CODEC_ID_H263 ||
555 s->codec_id == AV_CODEC_ID_H263P) &&
556 (avctx->sample_aspect_ratio.num > 255 ||
557 avctx->sample_aspect_ratio.den > 255)) {
558 av_log(avctx, AV_LOG_WARNING,
559 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
560 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
561 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
562 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
565 if ((s->codec_id == AV_CODEC_ID_H263 ||
566 s->codec_id == AV_CODEC_ID_H263P) &&
567 (avctx->width > 2048 ||
568 avctx->height > 1152 )) {
569 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
570 return AVERROR(EINVAL);
572 if ((s->codec_id == AV_CODEC_ID_H263 ||
573 s->codec_id == AV_CODEC_ID_H263P) &&
574 ((avctx->width &3) ||
575 (avctx->height&3) )) {
576 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
577 return AVERROR(EINVAL);
580 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
581 (avctx->width > 4095 ||
582 avctx->height > 4095 )) {
583 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
584 return AVERROR(EINVAL);
587 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
588 (avctx->width > 16383 ||
589 avctx->height > 16383 )) {
590 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
591 return AVERROR(EINVAL);
594 if (s->codec_id == AV_CODEC_ID_RV10 &&
596 avctx->height&15 )) {
597 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
598 return AVERROR(EINVAL);
601 if (s->codec_id == AV_CODEC_ID_RV20 &&
604 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
605 return AVERROR(EINVAL);
608 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
609 s->codec_id == AV_CODEC_ID_WMV2) &&
611 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
612 return AVERROR(EINVAL);
615 if ((avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
616 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
617 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
618 return AVERROR(EINVAL);
621 #if FF_API_PRIVATE_OPT
622 FF_DISABLE_DEPRECATION_WARNINGS
623 if (avctx->mpeg_quant)
624 s->mpeg_quant = avctx->mpeg_quant;
625 FF_ENABLE_DEPRECATION_WARNINGS
628 // FIXME mpeg2 uses that too
629 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
630 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
631 av_log(avctx, AV_LOG_ERROR,
632 "mpeg2 style quantization not supported by codec\n");
633 return AVERROR(EINVAL);
636 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
637 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
638 return AVERROR(EINVAL);
641 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
642 avctx->mb_decision != FF_MB_DECISION_RD) {
643 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
644 return AVERROR(EINVAL);
647 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
648 (s->codec_id == AV_CODEC_ID_AMV ||
649 s->codec_id == AV_CODEC_ID_MJPEG)) {
650 // Used to produce garbage with MJPEG.
651 av_log(avctx, AV_LOG_ERROR,
652 "QP RD is no longer compatible with MJPEG or AMV\n");
653 return AVERROR(EINVAL);
656 #if FF_API_PRIVATE_OPT
657 FF_DISABLE_DEPRECATION_WARNINGS
658 if (avctx->scenechange_threshold)
659 s->scenechange_threshold = avctx->scenechange_threshold;
660 FF_ENABLE_DEPRECATION_WARNINGS
663 if (s->scenechange_threshold < 1000000000 &&
664 (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
665 av_log(avctx, AV_LOG_ERROR,
666 "closed gop with scene change detection are not supported yet, "
667 "set threshold to 1000000000\n");
668 return AVERROR_PATCHWELCOME;
671 if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
672 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
673 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
674 av_log(avctx, AV_LOG_ERROR,
675 "low delay forcing is only available for mpeg2, "
676 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
677 return AVERROR(EINVAL);
679 if (s->max_b_frames != 0) {
680 av_log(avctx, AV_LOG_ERROR,
681 "B-frames cannot be used with low delay\n");
682 return AVERROR(EINVAL);
686 if (s->q_scale_type == 1) {
687 if (avctx->qmax > 28) {
688 av_log(avctx, AV_LOG_ERROR,
689 "non linear quant only supports qmax <= 28 currently\n");
690 return AVERROR_PATCHWELCOME;
694 if (avctx->slices > 1 &&
695 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
696 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
697 return AVERROR(EINVAL);
700 if (avctx->thread_count > 1 &&
701 s->codec_id != AV_CODEC_ID_MPEG4 &&
702 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
703 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
704 s->codec_id != AV_CODEC_ID_MJPEG &&
705 (s->codec_id != AV_CODEC_ID_H263P)) {
706 av_log(avctx, AV_LOG_ERROR,
707 "multi threaded encoding not supported by codec\n");
708 return AVERROR_PATCHWELCOME;
711 if (avctx->thread_count < 1) {
712 av_log(avctx, AV_LOG_ERROR,
713 "automatic thread number detection not supported by codec, "
715 return AVERROR_PATCHWELCOME;
718 if (!avctx->time_base.den || !avctx->time_base.num) {
719 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
720 return AVERROR(EINVAL);
723 #if FF_API_PRIVATE_OPT
724 FF_DISABLE_DEPRECATION_WARNINGS
725 if (avctx->b_frame_strategy)
726 s->b_frame_strategy = avctx->b_frame_strategy;
727 if (avctx->b_sensitivity != 40)
728 s->b_sensitivity = avctx->b_sensitivity;
729 FF_ENABLE_DEPRECATION_WARNINGS
732 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
733 av_log(avctx, AV_LOG_INFO,
734 "notice: b_frame_strategy only affects the first pass\n");
735 s->b_frame_strategy = 0;
738 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
740 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
741 avctx->time_base.den /= i;
742 avctx->time_base.num /= i;
746 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
747 // (a + x * 3 / 8) / x
748 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
749 s->inter_quant_bias = 0;
751 s->intra_quant_bias = 0;
753 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
756 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
757 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
758 return AVERROR(EINVAL);
761 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
763 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
764 avctx->time_base.den > (1 << 16) - 1) {
765 av_log(avctx, AV_LOG_ERROR,
766 "timebase %d/%d not supported by MPEG 4 standard, "
767 "the maximum admitted value for the timebase denominator "
768 "is %d\n", avctx->time_base.num, avctx->time_base.den,
770 return AVERROR(EINVAL);
772 s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
774 switch (avctx->codec->id) {
775 case AV_CODEC_ID_MPEG1VIDEO:
776 s->out_format = FMT_MPEG1;
777 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
778 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
780 case AV_CODEC_ID_MPEG2VIDEO:
781 s->out_format = FMT_MPEG1;
782 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
783 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
786 case AV_CODEC_ID_MJPEG:
787 case AV_CODEC_ID_AMV:
788 s->out_format = FMT_MJPEG;
789 s->intra_only = 1; /* force intra only for jpeg */
790 if (!CONFIG_MJPEG_ENCODER)
791 return AVERROR_ENCODER_NOT_FOUND;
792 if ((ret = ff_mjpeg_encode_init(s)) < 0)
797 case AV_CODEC_ID_SPEEDHQ:
798 s->out_format = FMT_SPEEDHQ;
799 s->intra_only = 1; /* force intra only for SHQ */
800 if (!CONFIG_SPEEDHQ_ENCODER)
801 return AVERROR_ENCODER_NOT_FOUND;
802 if ((ret = ff_speedhq_encode_init(s)) < 0)
807 case AV_CODEC_ID_H261:
808 if (!CONFIG_H261_ENCODER)
809 return AVERROR_ENCODER_NOT_FOUND;
810 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
811 av_log(avctx, AV_LOG_ERROR,
812 "The specified picture size of %dx%d is not valid for the "
813 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
814 s->width, s->height);
815 return AVERROR(EINVAL);
817 s->out_format = FMT_H261;
820 s->rtp_mode = 0; /* Sliced encoding not supported */
822 case AV_CODEC_ID_H263:
823 if (!CONFIG_H263_ENCODER)
824 return AVERROR_ENCODER_NOT_FOUND;
825 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
826 s->width, s->height) == 8) {
827 av_log(avctx, AV_LOG_ERROR,
828 "The specified picture size of %dx%d is not valid for "
829 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
830 "352x288, 704x576, and 1408x1152. "
831 "Try H.263+.\n", s->width, s->height);
832 return AVERROR(EINVAL);
834 s->out_format = FMT_H263;
838 case AV_CODEC_ID_H263P:
839 s->out_format = FMT_H263;
842 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
843 s->modified_quant = s->h263_aic;
844 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
845 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
848 /* These are just to be sure */
852 case AV_CODEC_ID_FLV1:
853 s->out_format = FMT_H263;
854 s->h263_flv = 2; /* format = 1; 11-bit codes */
855 s->unrestricted_mv = 1;
856 s->rtp_mode = 0; /* don't allow GOB */
860 case AV_CODEC_ID_RV10:
861 s->out_format = FMT_H263;
865 case AV_CODEC_ID_RV20:
866 s->out_format = FMT_H263;
869 s->modified_quant = 1;
873 s->unrestricted_mv = 0;
875 case AV_CODEC_ID_MPEG4:
876 s->out_format = FMT_H263;
878 s->unrestricted_mv = 1;
879 s->low_delay = s->max_b_frames ? 0 : 1;
880 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
882 case AV_CODEC_ID_MSMPEG4V2:
883 s->out_format = FMT_H263;
885 s->unrestricted_mv = 1;
886 s->msmpeg4_version = 2;
890 case AV_CODEC_ID_MSMPEG4V3:
891 s->out_format = FMT_H263;
893 s->unrestricted_mv = 1;
894 s->msmpeg4_version = 3;
895 s->flipflop_rounding = 1;
899 case AV_CODEC_ID_WMV1:
900 s->out_format = FMT_H263;
902 s->unrestricted_mv = 1;
903 s->msmpeg4_version = 4;
904 s->flipflop_rounding = 1;
908 case AV_CODEC_ID_WMV2:
909 s->out_format = FMT_H263;
911 s->unrestricted_mv = 1;
912 s->msmpeg4_version = 5;
913 s->flipflop_rounding = 1;
918 return AVERROR(EINVAL);
921 #if FF_API_PRIVATE_OPT
922 FF_DISABLE_DEPRECATION_WARNINGS
923 if (avctx->noise_reduction)
924 s->noise_reduction = avctx->noise_reduction;
925 FF_ENABLE_DEPRECATION_WARNINGS
928 avctx->has_b_frames = !s->low_delay;
932 s->progressive_frame =
933 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
934 AV_CODEC_FLAG_INTERLACED_ME) ||
939 if ((ret = ff_mpv_common_init(s)) < 0)
942 ff_fdctdsp_init(&s->fdsp, avctx);
943 ff_me_cmp_init(&s->mecc, avctx);
944 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
945 ff_pixblockdsp_init(&s->pdsp, avctx);
946 ff_qpeldsp_init(&s->qdsp);
948 if (s->msmpeg4_version) {
949 int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
950 if (!(s->ac_stats = av_mallocz(ac_stats_size)))
951 return AVERROR(ENOMEM);
954 if (!(avctx->stats_out = av_mallocz(256)) ||
955 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
956 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
957 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
958 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
959 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
960 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
961 !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
962 !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
963 return AVERROR(ENOMEM);
965 if (s->noise_reduction) {
966 if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
967 return AVERROR(ENOMEM);
970 ff_dct_encode_init(s);
972 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
973 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
975 if (s->slice_context_count > 1) {
978 if (avctx->codec_id == AV_CODEC_ID_H263P)
979 s->h263_slice_structured = 1;
982 s->quant_precision = 5;
984 #if FF_API_PRIVATE_OPT
985 FF_DISABLE_DEPRECATION_WARNINGS
986 if (avctx->frame_skip_threshold)
987 s->frame_skip_threshold = avctx->frame_skip_threshold;
988 if (avctx->frame_skip_factor)
989 s->frame_skip_factor = avctx->frame_skip_factor;
990 if (avctx->frame_skip_exp)
991 s->frame_skip_exp = avctx->frame_skip_exp;
992 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
993 s->frame_skip_cmp = avctx->frame_skip_cmp;
994 FF_ENABLE_DEPRECATION_WARNINGS
997 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
998 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
1000 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
1001 ff_h261_encode_init(s);
1002 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
1003 ff_h263_encode_init(s);
1004 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
1005 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
1007 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1008 && s->out_format == FMT_MPEG1)
1009 ff_mpeg1_encode_init(s);
1012 for (i = 0; i < 64; i++) {
1013 int j = s->idsp.idct_permutation[i];
1014 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1016 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1017 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1018 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1019 s->intra_matrix[j] =
1020 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1021 } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
1022 s->intra_matrix[j] =
1023 s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1026 s->chroma_intra_matrix[j] =
1027 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1028 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1030 if (avctx->intra_matrix)
1031 s->intra_matrix[j] = avctx->intra_matrix[i];
1032 if (avctx->inter_matrix)
1033 s->inter_matrix[j] = avctx->inter_matrix[i];
1036 /* precompute matrix */
1037 /* for mjpeg, we do include qscale in the matrix */
1038 if (s->out_format != FMT_MJPEG) {
1039 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1040 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1042 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1043 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1047 if ((ret = ff_rate_control_init(s)) < 0)
1050 #if FF_API_PRIVATE_OPT
1051 FF_DISABLE_DEPRECATION_WARNINGS
1052 if (avctx->brd_scale)
1053 s->brd_scale = avctx->brd_scale;
1055 if (avctx->prediction_method)
1056 s->pred = avctx->prediction_method + 1;
1057 FF_ENABLE_DEPRECATION_WARNINGS
1060 if (s->b_frame_strategy == 2) {
1061 for (i = 0; i < s->max_b_frames + 2; i++) {
1062 s->tmp_frames[i] = av_frame_alloc();
1063 if (!s->tmp_frames[i])
1064 return AVERROR(ENOMEM);
1066 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1067 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1068 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1070 ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1076 cpb_props = ff_add_cpb_side_data(avctx);
1078 return AVERROR(ENOMEM);
1079 cpb_props->max_bitrate = avctx->rc_max_rate;
1080 cpb_props->min_bitrate = avctx->rc_min_rate;
1081 cpb_props->avg_bitrate = avctx->bit_rate;
1082 cpb_props->buffer_size = avctx->rc_buffer_size;
1087 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1089 MpegEncContext *s = avctx->priv_data;
1092 ff_rate_control_uninit(s);
1094 ff_mpv_common_end(s);
1095 if (CONFIG_MJPEG_ENCODER &&
1096 s->out_format == FMT_MJPEG)
1097 ff_mjpeg_encode_close(s);
1099 av_freep(&avctx->extradata);
1101 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1102 av_frame_free(&s->tmp_frames[i]);
1104 ff_free_picture_tables(&s->new_picture);
1105 ff_mpeg_unref_picture(avctx, &s->new_picture);
1107 av_freep(&avctx->stats_out);
1108 av_freep(&s->ac_stats);
1110 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1111 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1112 s->q_chroma_intra_matrix= NULL;
1113 s->q_chroma_intra_matrix16= NULL;
1114 av_freep(&s->q_intra_matrix);
1115 av_freep(&s->q_inter_matrix);
1116 av_freep(&s->q_intra_matrix16);
1117 av_freep(&s->q_inter_matrix16);
1118 av_freep(&s->input_picture);
1119 av_freep(&s->reordered_input_picture);
1120 av_freep(&s->dct_offset);
1125 static int get_sae(uint8_t *src, int ref, int stride)
1130 for (y = 0; y < 16; y++) {
1131 for (x = 0; x < 16; x++) {
1132 acc += FFABS(src[x + y * stride] - ref);
1139 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1140 uint8_t *ref, int stride)
1146 h = s->height & ~15;
1148 for (y = 0; y < h; y += 16) {
1149 for (x = 0; x < w; x += 16) {
1150 int offset = x + y * stride;
1151 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1153 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1154 int sae = get_sae(src + offset, mean, stride);
1156 acc += sae + 500 < sad;
1162 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1164 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1165 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1166 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1167 &s->linesize, &s->uvlinesize);
1170 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1172 Picture *pic = NULL;
1174 int i, display_picture_number = 0, ret;
1175 int encoding_delay = s->max_b_frames ? s->max_b_frames
1176 : (s->low_delay ? 0 : 1);
1177 int flush_offset = 1;
1182 display_picture_number = s->input_picture_number++;
1184 if (pts != AV_NOPTS_VALUE) {
1185 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1186 int64_t last = s->user_specified_pts;
1189 av_log(s->avctx, AV_LOG_ERROR,
1190 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1192 return AVERROR(EINVAL);
1195 if (!s->low_delay && display_picture_number == 1)
1196 s->dts_delta = pts - last;
1198 s->user_specified_pts = pts;
1200 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1201 s->user_specified_pts =
1202 pts = s->user_specified_pts + 1;
1203 av_log(s->avctx, AV_LOG_INFO,
1204 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1207 pts = display_picture_number;
1211 if (!pic_arg->buf[0] ||
1212 pic_arg->linesize[0] != s->linesize ||
1213 pic_arg->linesize[1] != s->uvlinesize ||
1214 pic_arg->linesize[2] != s->uvlinesize)
1216 if ((s->width & 15) || (s->height & 15))
1218 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1220 if (s->linesize & (STRIDE_ALIGN-1))
1223 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1224 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1226 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1230 pic = &s->picture[i];
1234 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1237 ret = alloc_picture(s, pic, direct);
1242 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1243 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1244 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1247 int h_chroma_shift, v_chroma_shift;
1248 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1252 for (i = 0; i < 3; i++) {
1253 int src_stride = pic_arg->linesize[i];
1254 int dst_stride = i ? s->uvlinesize : s->linesize;
1255 int h_shift = i ? h_chroma_shift : 0;
1256 int v_shift = i ? v_chroma_shift : 0;
1257 int w = s->width >> h_shift;
1258 int h = s->height >> v_shift;
1259 uint8_t *src = pic_arg->data[i];
1260 uint8_t *dst = pic->f->data[i];
1263 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1264 && !s->progressive_sequence
1265 && FFALIGN(s->height, 32) - s->height > 16)
1268 if (!s->avctx->rc_buffer_size)
1269 dst += INPLACE_OFFSET;
1271 if (src_stride == dst_stride)
1272 memcpy(dst, src, src_stride * h);
1275 uint8_t *dst2 = dst;
1277 memcpy(dst2, src, w);
1282 if ((s->width & 15) || (s->height & (vpad-1))) {
1283 s->mpvencdsp.draw_edges(dst, dst_stride,
1293 ret = av_frame_copy_props(pic->f, pic_arg);
1297 pic->f->display_picture_number = display_picture_number;
1298 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1300 /* Flushing: When we have not received enough input frames,
1301 * ensure s->input_picture[0] contains the first picture */
1302 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1303 if (s->input_picture[flush_offset])
1306 if (flush_offset <= 1)
1309 encoding_delay = encoding_delay - flush_offset + 1;
1312 /* shift buffer entries */
1313 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1314 s->input_picture[i - flush_offset] = s->input_picture[i];
1316 s->input_picture[encoding_delay] = (Picture*) pic;
1321 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1325 int64_t score64 = 0;
1327 for (plane = 0; plane < 3; plane++) {
1328 const int stride = p->f->linesize[plane];
1329 const int bw = plane ? 1 : 2;
1330 for (y = 0; y < s->mb_height * bw; y++) {
1331 for (x = 0; x < s->mb_width * bw; x++) {
1332 int off = p->shared ? 0 : 16;
1333 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1334 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1335 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1337 switch (FFABS(s->frame_skip_exp)) {
1338 case 0: score = FFMAX(score, v); break;
1339 case 1: score += FFABS(v); break;
1340 case 2: score64 += v * (int64_t)v; break;
1341 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1342 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1351 if (s->frame_skip_exp < 0)
1352 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1353 -1.0/s->frame_skip_exp);
1355 if (score64 < s->frame_skip_threshold)
1357 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1362 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1364 AVPacket pkt = { 0 };
1368 av_init_packet(&pkt);
1370 ret = avcodec_send_frame(c, frame);
1375 ret = avcodec_receive_packet(c, &pkt);
1378 av_packet_unref(&pkt);
1379 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1386 static int estimate_best_b_count(MpegEncContext *s)
1388 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1389 const int scale = s->brd_scale;
1390 int width = s->width >> scale;
1391 int height = s->height >> scale;
1392 int i, j, out_size, p_lambda, b_lambda, lambda2;
1393 int64_t best_rd = INT64_MAX;
1394 int best_b_count = -1;
1397 av_assert0(scale >= 0 && scale <= 3);
1400 //s->next_picture_ptr->quality;
1401 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1402 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1403 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1404 if (!b_lambda) // FIXME we should do this somewhere else
1405 b_lambda = p_lambda;
1406 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1409 for (i = 0; i < s->max_b_frames + 2; i++) {
1410 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1411 s->next_picture_ptr;
1414 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1415 pre_input = *pre_input_ptr;
1416 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1418 if (!pre_input.shared && i) {
1419 data[0] += INPLACE_OFFSET;
1420 data[1] += INPLACE_OFFSET;
1421 data[2] += INPLACE_OFFSET;
1424 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1425 s->tmp_frames[i]->linesize[0],
1427 pre_input.f->linesize[0],
1429 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1430 s->tmp_frames[i]->linesize[1],
1432 pre_input.f->linesize[1],
1433 width >> 1, height >> 1);
1434 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1435 s->tmp_frames[i]->linesize[2],
1437 pre_input.f->linesize[2],
1438 width >> 1, height >> 1);
1442 for (j = 0; j < s->max_b_frames + 1; j++) {
1446 if (!s->input_picture[j])
1449 c = avcodec_alloc_context3(NULL);
1451 return AVERROR(ENOMEM);
1455 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1456 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1457 c->mb_decision = s->avctx->mb_decision;
1458 c->me_cmp = s->avctx->me_cmp;
1459 c->mb_cmp = s->avctx->mb_cmp;
1460 c->me_sub_cmp = s->avctx->me_sub_cmp;
1461 c->pix_fmt = AV_PIX_FMT_YUV420P;
1462 c->time_base = s->avctx->time_base;
1463 c->max_b_frames = s->max_b_frames;
1465 ret = avcodec_open2(c, codec, NULL);
1469 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1470 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1472 out_size = encode_frame(c, s->tmp_frames[0]);
1478 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1480 for (i = 0; i < s->max_b_frames + 1; i++) {
1481 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1483 s->tmp_frames[i + 1]->pict_type = is_p ?
1484 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1485 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1487 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1493 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1496 /* get the delayed frames */
1497 out_size = encode_frame(c, NULL);
1502 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1504 rd += c->error[0] + c->error[1] + c->error[2];
1512 avcodec_free_context(&c);
1517 return best_b_count;
1520 static int select_input_picture(MpegEncContext *s)
1524 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1525 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1526 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1528 /* set next picture type & ordering */
1529 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1530 if (s->frame_skip_threshold || s->frame_skip_factor) {
1531 if (s->picture_in_gop_number < s->gop_size &&
1532 s->next_picture_ptr &&
1533 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1534 // FIXME check that the gop check above is +-1 correct
1535 av_frame_unref(s->input_picture[0]->f);
1537 ff_vbv_update(s, 0);
1543 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1544 !s->next_picture_ptr || s->intra_only) {
1545 s->reordered_input_picture[0] = s->input_picture[0];
1546 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1547 s->reordered_input_picture[0]->f->coded_picture_number =
1548 s->coded_picture_number++;
1552 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1553 for (i = 0; i < s->max_b_frames + 1; i++) {
1554 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1556 if (pict_num >= s->rc_context.num_entries)
1558 if (!s->input_picture[i]) {
1559 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1563 s->input_picture[i]->f->pict_type =
1564 s->rc_context.entry[pict_num].new_pict_type;
1568 if (s->b_frame_strategy == 0) {
1569 b_frames = s->max_b_frames;
1570 while (b_frames && !s->input_picture[b_frames])
1572 } else if (s->b_frame_strategy == 1) {
1573 for (i = 1; i < s->max_b_frames + 1; i++) {
1574 if (s->input_picture[i] &&
1575 s->input_picture[i]->b_frame_score == 0) {
1576 s->input_picture[i]->b_frame_score =
1578 s->input_picture[i ]->f->data[0],
1579 s->input_picture[i - 1]->f->data[0],
1583 for (i = 0; i < s->max_b_frames + 1; i++) {
1584 if (!s->input_picture[i] ||
1585 s->input_picture[i]->b_frame_score - 1 >
1586 s->mb_num / s->b_sensitivity)
1590 b_frames = FFMAX(0, i - 1);
1593 for (i = 0; i < b_frames + 1; i++) {
1594 s->input_picture[i]->b_frame_score = 0;
1596 } else if (s->b_frame_strategy == 2) {
1597 b_frames = estimate_best_b_count(s);
1604 for (i = b_frames - 1; i >= 0; i--) {
1605 int type = s->input_picture[i]->f->pict_type;
1606 if (type && type != AV_PICTURE_TYPE_B)
1609 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1610 b_frames == s->max_b_frames) {
1611 av_log(s->avctx, AV_LOG_ERROR,
1612 "warning, too many B-frames in a row\n");
1615 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1616 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1617 s->gop_size > s->picture_in_gop_number) {
1618 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1620 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1622 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1626 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1627 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1630 s->reordered_input_picture[0] = s->input_picture[b_frames];
1631 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1632 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1633 s->reordered_input_picture[0]->f->coded_picture_number =
1634 s->coded_picture_number++;
1635 for (i = 0; i < b_frames; i++) {
1636 s->reordered_input_picture[i + 1] = s->input_picture[i];
1637 s->reordered_input_picture[i + 1]->f->pict_type =
1639 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1640 s->coded_picture_number++;
1645 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1647 if (s->reordered_input_picture[0]) {
1648 s->reordered_input_picture[0]->reference =
1649 s->reordered_input_picture[0]->f->pict_type !=
1650 AV_PICTURE_TYPE_B ? 3 : 0;
1652 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1655 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1656 // input is a shared pix, so we can't modify it -> allocate a new
1657 // one & ensure that the shared one is reuseable
1660 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1663 pic = &s->picture[i];
1665 pic->reference = s->reordered_input_picture[0]->reference;
1666 if (alloc_picture(s, pic, 0) < 0) {
1670 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1674 /* mark us unused / free shared pic */
1675 av_frame_unref(s->reordered_input_picture[0]->f);
1676 s->reordered_input_picture[0]->shared = 0;
1678 s->current_picture_ptr = pic;
1680 // input is not a shared pix -> reuse buffer for current_pix
1681 s->current_picture_ptr = s->reordered_input_picture[0];
1682 for (i = 0; i < 4; i++) {
1683 s->new_picture.f->data[i] += INPLACE_OFFSET;
1686 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1687 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1688 s->current_picture_ptr)) < 0)
1691 s->picture_number = s->new_picture.f->display_picture_number;
1696 static void frame_end(MpegEncContext *s)
1698 if (s->unrestricted_mv &&
1699 s->current_picture.reference &&
1701 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1702 int hshift = desc->log2_chroma_w;
1703 int vshift = desc->log2_chroma_h;
1704 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1705 s->current_picture.f->linesize[0],
1706 s->h_edge_pos, s->v_edge_pos,
1707 EDGE_WIDTH, EDGE_WIDTH,
1708 EDGE_TOP | EDGE_BOTTOM);
1709 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1710 s->current_picture.f->linesize[1],
1711 s->h_edge_pos >> hshift,
1712 s->v_edge_pos >> vshift,
1713 EDGE_WIDTH >> hshift,
1714 EDGE_WIDTH >> vshift,
1715 EDGE_TOP | EDGE_BOTTOM);
1716 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1717 s->current_picture.f->linesize[2],
1718 s->h_edge_pos >> hshift,
1719 s->v_edge_pos >> vshift,
1720 EDGE_WIDTH >> hshift,
1721 EDGE_WIDTH >> vshift,
1722 EDGE_TOP | EDGE_BOTTOM);
1727 s->last_pict_type = s->pict_type;
1728 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1729 if (s->pict_type!= AV_PICTURE_TYPE_B)
1730 s->last_non_b_pict_type = s->pict_type;
1732 #if FF_API_CODED_FRAME
1733 FF_DISABLE_DEPRECATION_WARNINGS
1734 av_frame_unref(s->avctx->coded_frame);
1735 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1736 FF_ENABLE_DEPRECATION_WARNINGS
1738 #if FF_API_ERROR_FRAME
1739 FF_DISABLE_DEPRECATION_WARNINGS
1740 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1741 sizeof(s->current_picture.encoding_error));
1742 FF_ENABLE_DEPRECATION_WARNINGS
1746 static void update_noise_reduction(MpegEncContext *s)
1750 for (intra = 0; intra < 2; intra++) {
1751 if (s->dct_count[intra] > (1 << 16)) {
1752 for (i = 0; i < 64; i++) {
1753 s->dct_error_sum[intra][i] >>= 1;
1755 s->dct_count[intra] >>= 1;
1758 for (i = 0; i < 64; i++) {
1759 s->dct_offset[intra][i] = (s->noise_reduction *
1760 s->dct_count[intra] +
1761 s->dct_error_sum[intra][i] / 2) /
1762 (s->dct_error_sum[intra][i] + 1);
1767 static int frame_start(MpegEncContext *s)
1771 /* mark & release old frames */
1772 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1773 s->last_picture_ptr != s->next_picture_ptr &&
1774 s->last_picture_ptr->f->buf[0]) {
1775 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1778 s->current_picture_ptr->f->pict_type = s->pict_type;
1779 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1781 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1782 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1783 s->current_picture_ptr)) < 0)
1786 if (s->pict_type != AV_PICTURE_TYPE_B) {
1787 s->last_picture_ptr = s->next_picture_ptr;
1789 s->next_picture_ptr = s->current_picture_ptr;
1792 if (s->last_picture_ptr) {
1793 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1794 if (s->last_picture_ptr->f->buf[0] &&
1795 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1796 s->last_picture_ptr)) < 0)
1799 if (s->next_picture_ptr) {
1800 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1801 if (s->next_picture_ptr->f->buf[0] &&
1802 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1803 s->next_picture_ptr)) < 0)
1807 if (s->picture_structure!= PICT_FRAME) {
1809 for (i = 0; i < 4; i++) {
1810 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1811 s->current_picture.f->data[i] +=
1812 s->current_picture.f->linesize[i];
1814 s->current_picture.f->linesize[i] *= 2;
1815 s->last_picture.f->linesize[i] *= 2;
1816 s->next_picture.f->linesize[i] *= 2;
1820 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1821 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1822 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1823 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1824 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1825 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1827 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1828 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1831 if (s->dct_error_sum) {
1832 av_assert2(s->noise_reduction && s->encoding);
1833 update_noise_reduction(s);
1839 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1840 const AVFrame *pic_arg, int *got_packet)
1842 MpegEncContext *s = avctx->priv_data;
1843 int i, stuffing_count, ret;
1844 int context_count = s->slice_context_count;
1846 s->vbv_ignore_qmax = 0;
1848 s->picture_in_gop_number++;
1850 if (load_input_picture(s, pic_arg) < 0)
1853 if (select_input_picture(s) < 0) {
1858 if (s->new_picture.f->data[0]) {
1859 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1860 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1862 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1863 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1866 s->mb_info_ptr = av_packet_new_side_data(pkt,
1867 AV_PKT_DATA_H263_MB_INFO,
1868 s->mb_width*s->mb_height*12);
1869 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1872 for (i = 0; i < context_count; i++) {
1873 int start_y = s->thread_context[i]->start_mb_y;
1874 int end_y = s->thread_context[i]-> end_mb_y;
1875 int h = s->mb_height;
1876 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1877 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1879 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1882 s->pict_type = s->new_picture.f->pict_type;
1884 ret = frame_start(s);
1888 ret = encode_picture(s, s->picture_number);
1889 if (growing_buffer) {
1890 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1891 pkt->data = s->pb.buf;
1892 pkt->size = avctx->internal->byte_buffer_size;
1897 #if FF_API_STAT_BITS
1898 FF_DISABLE_DEPRECATION_WARNINGS
1899 avctx->header_bits = s->header_bits;
1900 avctx->mv_bits = s->mv_bits;
1901 avctx->misc_bits = s->misc_bits;
1902 avctx->i_tex_bits = s->i_tex_bits;
1903 avctx->p_tex_bits = s->p_tex_bits;
1904 avctx->i_count = s->i_count;
1905 // FIXME f/b_count in avctx
1906 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1907 avctx->skip_count = s->skip_count;
1908 FF_ENABLE_DEPRECATION_WARNINGS
1913 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1914 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1916 if (avctx->rc_buffer_size) {
1917 RateControlContext *rcc = &s->rc_context;
1918 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1919 int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1920 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1922 if (put_bits_count(&s->pb) > max_size &&
1923 s->lambda < s->lmax) {
1924 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1925 (s->qscale + 1) / s->qscale);
1926 if (s->adaptive_quant) {
1928 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1929 s->lambda_table[i] =
1930 FFMAX(s->lambda_table[i] + min_step,
1931 s->lambda_table[i] * (s->qscale + 1) /
1934 s->mb_skipped = 0; // done in frame_start()
1935 // done in encode_picture() so we must undo it
1936 if (s->pict_type == AV_PICTURE_TYPE_P) {
1937 if (s->flipflop_rounding ||
1938 s->codec_id == AV_CODEC_ID_H263P ||
1939 s->codec_id == AV_CODEC_ID_MPEG4)
1940 s->no_rounding ^= 1;
1942 if (s->pict_type != AV_PICTURE_TYPE_B) {
1943 s->time_base = s->last_time_base;
1944 s->last_non_b_time = s->time - s->pp_time;
1946 for (i = 0; i < context_count; i++) {
1947 PutBitContext *pb = &s->thread_context[i]->pb;
1948 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1950 s->vbv_ignore_qmax = 1;
1951 av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1955 av_assert0(avctx->rc_max_rate);
1958 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1959 ff_write_pass1_stats(s);
1961 for (i = 0; i < 4; i++) {
1962 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1963 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1965 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1966 s->current_picture_ptr->encoding_error,
1967 (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1970 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1971 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1972 s->misc_bits + s->i_tex_bits +
1974 flush_put_bits(&s->pb);
1975 s->frame_bits = put_bits_count(&s->pb);
1977 stuffing_count = ff_vbv_update(s, s->frame_bits);
1978 s->stuffing_bits = 8*stuffing_count;
1979 if (stuffing_count) {
1980 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1981 stuffing_count + 50) {
1982 av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1986 switch (s->codec_id) {
1987 case AV_CODEC_ID_MPEG1VIDEO:
1988 case AV_CODEC_ID_MPEG2VIDEO:
1989 while (stuffing_count--) {
1990 put_bits(&s->pb, 8, 0);
1993 case AV_CODEC_ID_MPEG4:
1994 put_bits(&s->pb, 16, 0);
1995 put_bits(&s->pb, 16, 0x1C3);
1996 stuffing_count -= 4;
1997 while (stuffing_count--) {
1998 put_bits(&s->pb, 8, 0xFF);
2002 av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2004 flush_put_bits(&s->pb);
2005 s->frame_bits = put_bits_count(&s->pb);
2008 /* update MPEG-1/2 vbv_delay for CBR */
2009 if (avctx->rc_max_rate &&
2010 avctx->rc_min_rate == avctx->rc_max_rate &&
2011 s->out_format == FMT_MPEG1 &&
2012 90000LL * (avctx->rc_buffer_size - 1) <=
2013 avctx->rc_max_rate * 0xFFFFLL) {
2014 AVCPBProperties *props;
2017 int vbv_delay, min_delay;
2018 double inbits = avctx->rc_max_rate *
2019 av_q2d(avctx->time_base);
2020 int minbits = s->frame_bits - 8 *
2021 (s->vbv_delay_ptr - s->pb.buf - 1);
2022 double bits = s->rc_context.buffer_index + minbits - inbits;
2025 av_log(avctx, AV_LOG_ERROR,
2026 "Internal error, negative bits\n");
2028 av_assert1(s->repeat_first_field == 0);
2030 vbv_delay = bits * 90000 / avctx->rc_max_rate;
2031 min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2034 vbv_delay = FFMAX(vbv_delay, min_delay);
2036 av_assert0(vbv_delay < 0xFFFF);
2038 s->vbv_delay_ptr[0] &= 0xF8;
2039 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2040 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2041 s->vbv_delay_ptr[2] &= 0x07;
2042 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2044 props = av_cpb_properties_alloc(&props_size);
2046 return AVERROR(ENOMEM);
2047 props->vbv_delay = vbv_delay * 300;
2049 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2050 (uint8_t*)props, props_size);
2056 #if FF_API_VBV_DELAY
2057 FF_DISABLE_DEPRECATION_WARNINGS
2058 avctx->vbv_delay = vbv_delay * 300;
2059 FF_ENABLE_DEPRECATION_WARNINGS
2062 s->total_bits += s->frame_bits;
2063 #if FF_API_STAT_BITS
2064 FF_DISABLE_DEPRECATION_WARNINGS
2065 avctx->frame_bits = s->frame_bits;
2066 FF_ENABLE_DEPRECATION_WARNINGS
2070 pkt->pts = s->current_picture.f->pts;
2071 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2072 if (!s->current_picture.f->coded_picture_number)
2073 pkt->dts = pkt->pts - s->dts_delta;
2075 pkt->dts = s->reordered_pts;
2076 s->reordered_pts = pkt->pts;
2078 pkt->dts = pkt->pts;
2079 if (s->current_picture.f->key_frame)
2080 pkt->flags |= AV_PKT_FLAG_KEY;
2082 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2087 /* release non-reference frames */
2088 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2089 if (!s->picture[i].reference)
2090 ff_mpeg_unref_picture(avctx, &s->picture[i]);
2093 av_assert1((s->frame_bits & 7) == 0);
2095 pkt->size = s->frame_bits / 8;
2096 *got_packet = !!pkt->size;
2100 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2101 int n, int threshold)
2103 static const char tab[64] = {
2104 3, 2, 2, 1, 1, 1, 1, 1,
2105 1, 1, 1, 1, 1, 1, 1, 1,
2106 1, 1, 1, 1, 1, 1, 1, 1,
2107 0, 0, 0, 0, 0, 0, 0, 0,
2108 0, 0, 0, 0, 0, 0, 0, 0,
2109 0, 0, 0, 0, 0, 0, 0, 0,
2110 0, 0, 0, 0, 0, 0, 0, 0,
2111 0, 0, 0, 0, 0, 0, 0, 0
2116 int16_t *block = s->block[n];
2117 const int last_index = s->block_last_index[n];
2120 if (threshold < 0) {
2122 threshold = -threshold;
2126 /* Are all we could set to zero already zero? */
2127 if (last_index <= skip_dc - 1)
2130 for (i = 0; i <= last_index; i++) {
2131 const int j = s->intra_scantable.permutated[i];
2132 const int level = FFABS(block[j]);
2134 if (skip_dc && i == 0)
2138 } else if (level > 1) {
2144 if (score >= threshold)
2146 for (i = skip_dc; i <= last_index; i++) {
2147 const int j = s->intra_scantable.permutated[i];
2151 s->block_last_index[n] = 0;
2153 s->block_last_index[n] = -1;
2156 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2160 const int maxlevel = s->max_qcoeff;
2161 const int minlevel = s->min_qcoeff;
2165 i = 1; // skip clipping of intra dc
2169 for (; i <= last_index; i++) {
2170 const int j = s->intra_scantable.permutated[i];
2171 int level = block[j];
2173 if (level > maxlevel) {
2176 } else if (level < minlevel) {
2184 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2185 av_log(s->avctx, AV_LOG_INFO,
2186 "warning, clipping %d dct coefficients to %d..%d\n",
2187 overflow, minlevel, maxlevel);
2190 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2194 for (y = 0; y < 8; y++) {
2195 for (x = 0; x < 8; x++) {
2201 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2202 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2203 int v = ptr[x2 + y2 * stride];
2209 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2214 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2215 int motion_x, int motion_y,
2216 int mb_block_height,
2220 int16_t weight[12][64];
2221 int16_t orig[12][64];
2222 const int mb_x = s->mb_x;
2223 const int mb_y = s->mb_y;
2226 int dct_offset = s->linesize * 8; // default for progressive frames
2227 int uv_dct_offset = s->uvlinesize * 8;
2228 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2229 ptrdiff_t wrap_y, wrap_c;
2231 for (i = 0; i < mb_block_count; i++)
2232 skip_dct[i] = s->skipdct;
2234 if (s->adaptive_quant) {
2235 const int last_qp = s->qscale;
2236 const int mb_xy = mb_x + mb_y * s->mb_stride;
2238 s->lambda = s->lambda_table[mb_xy];
2241 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2242 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2243 s->dquant = s->qscale - last_qp;
2245 if (s->out_format == FMT_H263) {
2246 s->dquant = av_clip(s->dquant, -2, 2);
2248 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2250 if (s->pict_type == AV_PICTURE_TYPE_B) {
2251 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2254 if (s->mv_type == MV_TYPE_8X8)
2260 ff_set_qscale(s, last_qp + s->dquant);
2261 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2262 ff_set_qscale(s, s->qscale + s->dquant);
2264 wrap_y = s->linesize;
2265 wrap_c = s->uvlinesize;
2266 ptr_y = s->new_picture.f->data[0] +
2267 (mb_y * 16 * wrap_y) + mb_x * 16;
2268 ptr_cb = s->new_picture.f->data[1] +
2269 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2270 ptr_cr = s->new_picture.f->data[2] +
2271 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2273 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2274 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2275 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2276 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2277 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2279 16, 16, mb_x * 16, mb_y * 16,
2280 s->width, s->height);
2282 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2284 mb_block_width, mb_block_height,
2285 mb_x * mb_block_width, mb_y * mb_block_height,
2287 ptr_cb = ebuf + 16 * wrap_y;
2288 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2290 mb_block_width, mb_block_height,
2291 mb_x * mb_block_width, mb_y * mb_block_height,
2293 ptr_cr = ebuf + 16 * wrap_y + 16;
2297 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2298 int progressive_score, interlaced_score;
2300 s->interlaced_dct = 0;
2301 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2302 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2303 NULL, wrap_y, 8) - 400;
2305 if (progressive_score > 0) {
2306 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2307 NULL, wrap_y * 2, 8) +
2308 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2309 NULL, wrap_y * 2, 8);
2310 if (progressive_score > interlaced_score) {
2311 s->interlaced_dct = 1;
2313 dct_offset = wrap_y;
2314 uv_dct_offset = wrap_c;
2316 if (s->chroma_format == CHROMA_422 ||
2317 s->chroma_format == CHROMA_444)
2323 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2324 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2325 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2326 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2328 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2332 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2333 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2334 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2335 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2336 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2337 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2338 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2339 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2340 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2341 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2342 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2343 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2347 op_pixels_func (*op_pix)[4];
2348 qpel_mc_func (*op_qpix)[16];
2349 uint8_t *dest_y, *dest_cb, *dest_cr;
2351 dest_y = s->dest[0];
2352 dest_cb = s->dest[1];
2353 dest_cr = s->dest[2];
2355 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2356 op_pix = s->hdsp.put_pixels_tab;
2357 op_qpix = s->qdsp.put_qpel_pixels_tab;
2359 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2360 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2363 if (s->mv_dir & MV_DIR_FORWARD) {
2364 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2365 s->last_picture.f->data,
2367 op_pix = s->hdsp.avg_pixels_tab;
2368 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2370 if (s->mv_dir & MV_DIR_BACKWARD) {
2371 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2372 s->next_picture.f->data,
2376 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2377 int progressive_score, interlaced_score;
2379 s->interlaced_dct = 0;
2380 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2381 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2385 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2386 progressive_score -= 400;
2388 if (progressive_score > 0) {
2389 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2391 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2395 if (progressive_score > interlaced_score) {
2396 s->interlaced_dct = 1;
2398 dct_offset = wrap_y;
2399 uv_dct_offset = wrap_c;
2401 if (s->chroma_format == CHROMA_422)
2407 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2408 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2409 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2410 dest_y + dct_offset, wrap_y);
2411 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2412 dest_y + dct_offset + 8, wrap_y);
2414 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2418 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2419 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2420 if (!s->chroma_y_shift) { /* 422 */
2421 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2422 dest_cb + uv_dct_offset, wrap_c);
2423 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2424 dest_cr + uv_dct_offset, wrap_c);
2427 /* pre quantization */
2428 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2429 2 * s->qscale * s->qscale) {
2431 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2433 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2435 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2436 wrap_y, 8) < 20 * s->qscale)
2438 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2439 wrap_y, 8) < 20 * s->qscale)
2441 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2443 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2445 if (!s->chroma_y_shift) { /* 422 */
2446 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2447 dest_cb + uv_dct_offset,
2448 wrap_c, 8) < 20 * s->qscale)
2450 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2451 dest_cr + uv_dct_offset,
2452 wrap_c, 8) < 20 * s->qscale)
2458 if (s->quantizer_noise_shaping) {
2460 get_visual_weight(weight[0], ptr_y , wrap_y);
2462 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2464 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2466 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2468 get_visual_weight(weight[4], ptr_cb , wrap_c);
2470 get_visual_weight(weight[5], ptr_cr , wrap_c);
2471 if (!s->chroma_y_shift) { /* 422 */
2473 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2476 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2479 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2482 /* DCT & quantize */
2483 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2485 for (i = 0; i < mb_block_count; i++) {
2488 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2489 // FIXME we could decide to change to quantizer instead of
2491 // JS: I don't think that would be a good idea it could lower
2492 // quality instead of improve it. Just INTRADC clipping
2493 // deserves changes in quantizer
2495 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2497 s->block_last_index[i] = -1;
2499 if (s->quantizer_noise_shaping) {
2500 for (i = 0; i < mb_block_count; i++) {
2502 s->block_last_index[i] =
2503 dct_quantize_refine(s, s->block[i], weight[i],
2504 orig[i], i, s->qscale);
2509 if (s->luma_elim_threshold && !s->mb_intra)
2510 for (i = 0; i < 4; i++)
2511 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2512 if (s->chroma_elim_threshold && !s->mb_intra)
2513 for (i = 4; i < mb_block_count; i++)
2514 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2516 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2517 for (i = 0; i < mb_block_count; i++) {
2518 if (s->block_last_index[i] == -1)
2519 s->coded_score[i] = INT_MAX / 256;
2524 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2525 s->block_last_index[4] =
2526 s->block_last_index[5] = 0;
2528 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2529 if (!s->chroma_y_shift) { /* 422 / 444 */
2530 for (i=6; i<12; i++) {
2531 s->block_last_index[i] = 0;
2532 s->block[i][0] = s->block[4][0];
2537 // non c quantize code returns incorrect block_last_index FIXME
2538 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2539 for (i = 0; i < mb_block_count; i++) {
2541 if (s->block_last_index[i] > 0) {
2542 for (j = 63; j > 0; j--) {
2543 if (s->block[i][s->intra_scantable.permutated[j]])
2546 s->block_last_index[i] = j;
2551 /* huffman encode */
2552 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2553 case AV_CODEC_ID_MPEG1VIDEO:
2554 case AV_CODEC_ID_MPEG2VIDEO:
2555 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2556 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2558 case AV_CODEC_ID_MPEG4:
2559 if (CONFIG_MPEG4_ENCODER)
2560 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2562 case AV_CODEC_ID_MSMPEG4V2:
2563 case AV_CODEC_ID_MSMPEG4V3:
2564 case AV_CODEC_ID_WMV1:
2565 if (CONFIG_MSMPEG4_ENCODER)
2566 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2568 case AV_CODEC_ID_WMV2:
2569 if (CONFIG_WMV2_ENCODER)
2570 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2572 case AV_CODEC_ID_H261:
2573 if (CONFIG_H261_ENCODER)
2574 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2576 case AV_CODEC_ID_H263:
2577 case AV_CODEC_ID_H263P:
2578 case AV_CODEC_ID_FLV1:
2579 case AV_CODEC_ID_RV10:
2580 case AV_CODEC_ID_RV20:
2581 if (CONFIG_H263_ENCODER)
2582 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2584 case AV_CODEC_ID_MJPEG:
2585 case AV_CODEC_ID_AMV:
2586 if (CONFIG_MJPEG_ENCODER)
2587 ff_mjpeg_encode_mb(s, s->block);
2589 case AV_CODEC_ID_SPEEDHQ:
2590 if (CONFIG_SPEEDHQ_ENCODER)
2591 ff_speedhq_encode_mb(s, s->block);
2598 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2600 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2601 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2602 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2605 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2608 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2611 d->mb_skip_run= s->mb_skip_run;
2613 d->last_dc[i] = s->last_dc[i];
2616 d->mv_bits= s->mv_bits;
2617 d->i_tex_bits= s->i_tex_bits;
2618 d->p_tex_bits= s->p_tex_bits;
2619 d->i_count= s->i_count;
2620 d->f_count= s->f_count;
2621 d->b_count= s->b_count;
2622 d->skip_count= s->skip_count;
2623 d->misc_bits= s->misc_bits;
2627 d->qscale= s->qscale;
2628 d->dquant= s->dquant;
2630 d->esc3_level_length= s->esc3_level_length;
2633 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2636 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2637 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2640 d->mb_skip_run= s->mb_skip_run;
2642 d->last_dc[i] = s->last_dc[i];
2645 d->mv_bits= s->mv_bits;
2646 d->i_tex_bits= s->i_tex_bits;
2647 d->p_tex_bits= s->p_tex_bits;
2648 d->i_count= s->i_count;
2649 d->f_count= s->f_count;
2650 d->b_count= s->b_count;
2651 d->skip_count= s->skip_count;
2652 d->misc_bits= s->misc_bits;
2654 d->mb_intra= s->mb_intra;
2655 d->mb_skipped= s->mb_skipped;
2656 d->mv_type= s->mv_type;
2657 d->mv_dir= s->mv_dir;
2659 if(s->data_partitioning){
2661 d->tex_pb= s->tex_pb;
2665 d->block_last_index[i]= s->block_last_index[i];
2666 d->interlaced_dct= s->interlaced_dct;
2667 d->qscale= s->qscale;
2669 d->esc3_level_length= s->esc3_level_length;
2672 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2673 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2674 int *dmin, int *next_block, int motion_x, int motion_y)
2677 uint8_t *dest_backup[3];
2679 copy_context_before_encode(s, backup, type);
2681 s->block= s->blocks[*next_block];
2682 s->pb= pb[*next_block];
2683 if(s->data_partitioning){
2684 s->pb2 = pb2 [*next_block];
2685 s->tex_pb= tex_pb[*next_block];
2689 memcpy(dest_backup, s->dest, sizeof(s->dest));
2690 s->dest[0] = s->sc.rd_scratchpad;
2691 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2692 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2693 av_assert0(s->linesize >= 32); //FIXME
2696 encode_mb(s, motion_x, motion_y);
2698 score= put_bits_count(&s->pb);
2699 if(s->data_partitioning){
2700 score+= put_bits_count(&s->pb2);
2701 score+= put_bits_count(&s->tex_pb);
2704 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2705 ff_mpv_reconstruct_mb(s, s->block);
2707 score *= s->lambda2;
2708 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2712 memcpy(s->dest, dest_backup, sizeof(s->dest));
2719 copy_context_after_encode(best, s, type);
2723 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2724 const uint32_t *sq = ff_square_tab + 256;
2729 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2730 else if(w==8 && h==8)
2731 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2735 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2744 static int sse_mb(MpegEncContext *s){
2748 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2749 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2752 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2753 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2754 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2755 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2757 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2758 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2759 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2762 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2763 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2764 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2767 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2768 MpegEncContext *s= *(void**)arg;
2772 s->me.dia_size= s->avctx->pre_dia_size;
2773 s->first_slice_line=1;
2774 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2775 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2776 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2778 s->first_slice_line=0;
2786 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2787 MpegEncContext *s= *(void**)arg;
2789 ff_check_alignment();
2791 s->me.dia_size= s->avctx->dia_size;
2792 s->first_slice_line=1;
2793 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2794 s->mb_x=0; //for block init below
2795 ff_init_block_index(s);
2796 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2797 s->block_index[0]+=2;
2798 s->block_index[1]+=2;
2799 s->block_index[2]+=2;
2800 s->block_index[3]+=2;
2802 /* compute motion vector & mb_type and store in context */
2803 if(s->pict_type==AV_PICTURE_TYPE_B)
2804 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2806 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2808 s->first_slice_line=0;
2813 static int mb_var_thread(AVCodecContext *c, void *arg){
2814 MpegEncContext *s= *(void**)arg;
2817 ff_check_alignment();
2819 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2820 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2823 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2825 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2827 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2828 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2830 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2831 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2832 s->me.mb_var_sum_temp += varc;
2838 static void write_slice_end(MpegEncContext *s){
2839 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2840 if(s->partitioned_frame){
2841 ff_mpeg4_merge_partitions(s);
2844 ff_mpeg4_stuffing(&s->pb);
2845 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2846 ff_mjpeg_encode_stuffing(s);
2847 } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2848 ff_speedhq_end_slice(s);
2851 flush_put_bits(&s->pb);
2853 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2854 s->misc_bits+= get_bits_diff(s);
2857 static void write_mb_info(MpegEncContext *s)
2859 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2860 int offset = put_bits_count(&s->pb);
2861 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2862 int gobn = s->mb_y / s->gob_index;
2864 if (CONFIG_H263_ENCODER)
2865 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2866 bytestream_put_le32(&ptr, offset);
2867 bytestream_put_byte(&ptr, s->qscale);
2868 bytestream_put_byte(&ptr, gobn);
2869 bytestream_put_le16(&ptr, mba);
2870 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2871 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2872 /* 4MV not implemented */
2873 bytestream_put_byte(&ptr, 0); /* hmv2 */
2874 bytestream_put_byte(&ptr, 0); /* vmv2 */
2877 static void update_mb_info(MpegEncContext *s, int startcode)
2881 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2882 s->mb_info_size += 12;
2883 s->prev_mb_info = s->last_mb_info;
2886 s->prev_mb_info = put_bits_count(&s->pb)/8;
2887 /* This might have incremented mb_info_size above, and we return without
2888 * actually writing any info into that slot yet. But in that case,
2889 * this will be called again at the start of the after writing the
2890 * start code, actually writing the mb info. */
2894 s->last_mb_info = put_bits_count(&s->pb)/8;
2895 if (!s->mb_info_size)
2896 s->mb_info_size += 12;
2900 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2902 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2903 && s->slice_context_count == 1
2904 && s->pb.buf == s->avctx->internal->byte_buffer) {
2905 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2906 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2908 uint8_t *new_buffer = NULL;
2909 int new_buffer_size = 0;
2911 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2912 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2913 return AVERROR(ENOMEM);
2918 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2919 s->avctx->internal->byte_buffer_size + size_increase);
2921 return AVERROR(ENOMEM);
2923 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2924 av_free(s->avctx->internal->byte_buffer);
2925 s->avctx->internal->byte_buffer = new_buffer;
2926 s->avctx->internal->byte_buffer_size = new_buffer_size;
2927 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2928 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2929 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2931 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2932 return AVERROR(EINVAL);
2936 static int encode_thread(AVCodecContext *c, void *arg){
2937 MpegEncContext *s= *(void**)arg;
2938 int mb_x, mb_y, mb_y_order;
2939 int chr_h= 16>>s->chroma_y_shift;
2941 MpegEncContext best_s = { 0 }, backup_s;
2942 uint8_t bit_buf[2][MAX_MB_BYTES];
2943 uint8_t bit_buf2[2][MAX_MB_BYTES];
2944 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2945 PutBitContext pb[2], pb2[2], tex_pb[2];
2947 ff_check_alignment();
2950 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2951 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2952 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2955 s->last_bits= put_bits_count(&s->pb);
2966 /* init last dc values */
2967 /* note: quant matrix value (8) is implied here */
2968 s->last_dc[i] = 128 << s->intra_dc_precision;
2970 s->current_picture.encoding_error[i] = 0;
2972 if(s->codec_id==AV_CODEC_ID_AMV){
2973 s->last_dc[0] = 128*8/13;
2974 s->last_dc[1] = 128*8/14;
2975 s->last_dc[2] = 128*8/14;
2978 memset(s->last_mv, 0, sizeof(s->last_mv));
2982 switch(s->codec_id){
2983 case AV_CODEC_ID_H263:
2984 case AV_CODEC_ID_H263P:
2985 case AV_CODEC_ID_FLV1:
2986 if (CONFIG_H263_ENCODER)
2987 s->gob_index = H263_GOB_HEIGHT(s->height);
2989 case AV_CODEC_ID_MPEG4:
2990 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2991 ff_mpeg4_init_partitions(s);
2997 s->first_slice_line = 1;
2998 s->ptr_lastgob = s->pb.buf;
2999 for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
3000 if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
3002 mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
3003 if (first_in_slice && mb_y_order != s->start_mb_y)
3004 ff_speedhq_end_slice(s);
3005 s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
3012 ff_set_qscale(s, s->qscale);
3013 ff_init_block_index(s);
3015 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3016 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3017 int mb_type= s->mb_type[xy];
3021 int size_increase = s->avctx->internal->byte_buffer_size/4
3022 + s->mb_width*MAX_MB_BYTES;
3024 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
3025 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
3026 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3029 if(s->data_partitioning){
3030 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3031 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3032 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3038 s->mb_y = mb_y; // moved into loop, can get changed by H.261
3039 ff_update_block_index(s);
3041 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3042 ff_h261_reorder_mb_index(s);
3043 xy= s->mb_y*s->mb_stride + s->mb_x;
3044 mb_type= s->mb_type[xy];
3047 /* write gob / video packet header */
3049 int current_packet_size, is_gob_start;
3051 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3053 is_gob_start = s->rtp_payload_size &&
3054 current_packet_size >= s->rtp_payload_size &&
3057 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3059 switch(s->codec_id){
3060 case AV_CODEC_ID_H263:
3061 case AV_CODEC_ID_H263P:
3062 if(!s->h263_slice_structured)
3063 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3065 case AV_CODEC_ID_MPEG2VIDEO:
3066 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3067 case AV_CODEC_ID_MPEG1VIDEO:
3068 if(s->mb_skip_run) is_gob_start=0;
3070 case AV_CODEC_ID_MJPEG:
3071 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3076 if(s->start_mb_y != mb_y || mb_x!=0){
3079 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3080 ff_mpeg4_init_partitions(s);
3084 av_assert2((put_bits_count(&s->pb)&7) == 0);
3085 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3087 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3088 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3089 int d = 100 / s->error_rate;
3091 current_packet_size=0;
3092 s->pb.buf_ptr= s->ptr_lastgob;
3093 av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3097 #if FF_API_RTP_CALLBACK
3098 FF_DISABLE_DEPRECATION_WARNINGS
3099 if (s->avctx->rtp_callback){
3100 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3101 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3103 FF_ENABLE_DEPRECATION_WARNINGS
3105 update_mb_info(s, 1);
3107 switch(s->codec_id){
3108 case AV_CODEC_ID_MPEG4:
3109 if (CONFIG_MPEG4_ENCODER) {
3110 ff_mpeg4_encode_video_packet_header(s);
3111 ff_mpeg4_clean_buffers(s);
3114 case AV_CODEC_ID_MPEG1VIDEO:
3115 case AV_CODEC_ID_MPEG2VIDEO:
3116 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3117 ff_mpeg1_encode_slice_header(s);
3118 ff_mpeg1_clean_buffers(s);
3121 case AV_CODEC_ID_H263:
3122 case AV_CODEC_ID_H263P:
3123 if (CONFIG_H263_ENCODER)
3124 ff_h263_encode_gob_header(s, mb_y);
3128 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3129 int bits= put_bits_count(&s->pb);
3130 s->misc_bits+= bits - s->last_bits;
3134 s->ptr_lastgob += current_packet_size;
3135 s->first_slice_line=1;
3136 s->resync_mb_x=mb_x;
3137 s->resync_mb_y=mb_y;
3141 if( (s->resync_mb_x == s->mb_x)
3142 && s->resync_mb_y+1 == s->mb_y){
3143 s->first_slice_line=0;
3147 s->dquant=0; //only for QP_RD
3149 update_mb_info(s, 0);
3151 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3153 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3155 copy_context_before_encode(&backup_s, s, -1);
3157 best_s.data_partitioning= s->data_partitioning;
3158 best_s.partitioned_frame= s->partitioned_frame;
3159 if(s->data_partitioning){
3160 backup_s.pb2= s->pb2;
3161 backup_s.tex_pb= s->tex_pb;
3164 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3165 s->mv_dir = MV_DIR_FORWARD;
3166 s->mv_type = MV_TYPE_16X16;
3168 s->mv[0][0][0] = s->p_mv_table[xy][0];
3169 s->mv[0][0][1] = s->p_mv_table[xy][1];
3170 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3171 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3173 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3174 s->mv_dir = MV_DIR_FORWARD;
3175 s->mv_type = MV_TYPE_FIELD;
3178 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3179 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3180 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3182 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3183 &dmin, &next_block, 0, 0);
3185 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3186 s->mv_dir = MV_DIR_FORWARD;
3187 s->mv_type = MV_TYPE_16X16;
3191 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3192 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3194 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3195 s->mv_dir = MV_DIR_FORWARD;
3196 s->mv_type = MV_TYPE_8X8;
3199 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3200 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3202 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3203 &dmin, &next_block, 0, 0);
3205 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3206 s->mv_dir = MV_DIR_FORWARD;
3207 s->mv_type = MV_TYPE_16X16;
3209 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3210 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3211 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3212 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3214 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3215 s->mv_dir = MV_DIR_BACKWARD;
3216 s->mv_type = MV_TYPE_16X16;
3218 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3219 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3220 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3221 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3223 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3224 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3225 s->mv_type = MV_TYPE_16X16;
3227 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3228 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3229 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3230 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3231 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3232 &dmin, &next_block, 0, 0);
3234 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3235 s->mv_dir = MV_DIR_FORWARD;
3236 s->mv_type = MV_TYPE_FIELD;
3239 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3240 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3241 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3243 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3244 &dmin, &next_block, 0, 0);
3246 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3247 s->mv_dir = MV_DIR_BACKWARD;
3248 s->mv_type = MV_TYPE_FIELD;
3251 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3252 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3253 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3255 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3256 &dmin, &next_block, 0, 0);
3258 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3259 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3260 s->mv_type = MV_TYPE_FIELD;
3262 for(dir=0; dir<2; dir++){
3264 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3265 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3266 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3269 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3270 &dmin, &next_block, 0, 0);
3272 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3274 s->mv_type = MV_TYPE_16X16;
3278 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3279 &dmin, &next_block, 0, 0);
3280 if(s->h263_pred || s->h263_aic){
3282 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3284 ff_clean_intra_table_entries(s); //old mode?
3288 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3289 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3290 const int last_qp= backup_s.qscale;
3293 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3294 static const int dquant_tab[4]={-1,1,-2,2};
3295 int storecoefs = s->mb_intra && s->dc_val[0];
3297 av_assert2(backup_s.dquant == 0);
3300 s->mv_dir= best_s.mv_dir;
3301 s->mv_type = MV_TYPE_16X16;
3302 s->mb_intra= best_s.mb_intra;
3303 s->mv[0][0][0] = best_s.mv[0][0][0];
3304 s->mv[0][0][1] = best_s.mv[0][0][1];
3305 s->mv[1][0][0] = best_s.mv[1][0][0];
3306 s->mv[1][0][1] = best_s.mv[1][0][1];
3308 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3309 for(; qpi<4; qpi++){
3310 int dquant= dquant_tab[qpi];
3311 qp= last_qp + dquant;
3312 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3314 backup_s.dquant= dquant;
3317 dc[i]= s->dc_val[0][ s->block_index[i] ];
3318 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3322 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3323 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3324 if(best_s.qscale != qp){
3327 s->dc_val[0][ s->block_index[i] ]= dc[i];
3328 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3335 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3336 int mx= s->b_direct_mv_table[xy][0];
3337 int my= s->b_direct_mv_table[xy][1];
3339 backup_s.dquant = 0;
3340 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3342 ff_mpeg4_set_direct_mv(s, mx, my);
3343 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3344 &dmin, &next_block, mx, my);
3346 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3347 backup_s.dquant = 0;
3348 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3350 ff_mpeg4_set_direct_mv(s, 0, 0);
3351 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3352 &dmin, &next_block, 0, 0);
3354 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3357 coded |= s->block_last_index[i];
3360 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3361 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3362 mx=my=0; //FIXME find the one we actually used
3363 ff_mpeg4_set_direct_mv(s, mx, my);
3364 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3372 s->mv_dir= best_s.mv_dir;
3373 s->mv_type = best_s.mv_type;
3375 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3376 s->mv[0][0][1] = best_s.mv[0][0][1];
3377 s->mv[1][0][0] = best_s.mv[1][0][0];
3378 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3381 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3382 &dmin, &next_block, mx, my);
3387 s->current_picture.qscale_table[xy] = best_s.qscale;
3389 copy_context_after_encode(s, &best_s, -1);
3391 pb_bits_count= put_bits_count(&s->pb);
3392 flush_put_bits(&s->pb);
3393 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3396 if(s->data_partitioning){
3397 pb2_bits_count= put_bits_count(&s->pb2);
3398 flush_put_bits(&s->pb2);
3399 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3400 s->pb2= backup_s.pb2;
3402 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3403 flush_put_bits(&s->tex_pb);
3404 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3405 s->tex_pb= backup_s.tex_pb;
3407 s->last_bits= put_bits_count(&s->pb);
3409 if (CONFIG_H263_ENCODER &&
3410 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3411 ff_h263_update_motion_val(s);
3413 if(next_block==0){ //FIXME 16 vs linesize16
3414 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3415 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3416 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3419 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3420 ff_mpv_reconstruct_mb(s, s->block);
3422 int motion_x = 0, motion_y = 0;
3423 s->mv_type=MV_TYPE_16X16;
3424 // only one MB-Type possible
3427 case CANDIDATE_MB_TYPE_INTRA:
3430 motion_x= s->mv[0][0][0] = 0;
3431 motion_y= s->mv[0][0][1] = 0;
3433 case CANDIDATE_MB_TYPE_INTER:
3434 s->mv_dir = MV_DIR_FORWARD;
3436 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3437 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3439 case CANDIDATE_MB_TYPE_INTER_I:
3440 s->mv_dir = MV_DIR_FORWARD;
3441 s->mv_type = MV_TYPE_FIELD;
3444 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3445 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3446 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3449 case CANDIDATE_MB_TYPE_INTER4V:
3450 s->mv_dir = MV_DIR_FORWARD;
3451 s->mv_type = MV_TYPE_8X8;
3454 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3455 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3458 case CANDIDATE_MB_TYPE_DIRECT:
3459 if (CONFIG_MPEG4_ENCODER) {
3460 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3462 motion_x=s->b_direct_mv_table[xy][0];
3463 motion_y=s->b_direct_mv_table[xy][1];
3464 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3467 case CANDIDATE_MB_TYPE_DIRECT0:
3468 if (CONFIG_MPEG4_ENCODER) {
3469 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3471 ff_mpeg4_set_direct_mv(s, 0, 0);
3474 case CANDIDATE_MB_TYPE_BIDIR:
3475 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3477 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3478 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3479 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3480 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3482 case CANDIDATE_MB_TYPE_BACKWARD:
3483 s->mv_dir = MV_DIR_BACKWARD;
3485 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3486 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3488 case CANDIDATE_MB_TYPE_FORWARD:
3489 s->mv_dir = MV_DIR_FORWARD;
3491 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3492 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3494 case CANDIDATE_MB_TYPE_FORWARD_I:
3495 s->mv_dir = MV_DIR_FORWARD;
3496 s->mv_type = MV_TYPE_FIELD;
3499 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3500 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3501 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3504 case CANDIDATE_MB_TYPE_BACKWARD_I:
3505 s->mv_dir = MV_DIR_BACKWARD;
3506 s->mv_type = MV_TYPE_FIELD;
3509 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3510 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3511 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3514 case CANDIDATE_MB_TYPE_BIDIR_I:
3515 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3516 s->mv_type = MV_TYPE_FIELD;
3518 for(dir=0; dir<2; dir++){
3520 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3521 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3522 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3527 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3530 encode_mb(s, motion_x, motion_y);
3532 // RAL: Update last macroblock type
3533 s->last_mv_dir = s->mv_dir;
3535 if (CONFIG_H263_ENCODER &&
3536 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3537 ff_h263_update_motion_val(s);
3539 ff_mpv_reconstruct_mb(s, s->block);
3542 /* clean the MV table in IPS frames for direct mode in B-frames */
3543 if(s->mb_intra /* && I,P,S_TYPE */){
3544 s->p_mv_table[xy][0]=0;
3545 s->p_mv_table[xy][1]=0;
3548 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3552 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3553 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3555 s->current_picture.encoding_error[0] += sse(
3556 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3557 s->dest[0], w, h, s->linesize);
3558 s->current_picture.encoding_error[1] += sse(
3559 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3560 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3561 s->current_picture.encoding_error[2] += sse(
3562 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3563 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3566 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3567 ff_h263_loop_filter(s);
3569 ff_dlog(s->avctx, "MB %d %d bits\n",
3570 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3574 //not beautiful here but we must write it before flushing so it has to be here
3575 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3576 ff_msmpeg4_encode_ext_header(s);
3580 #if FF_API_RTP_CALLBACK
3581 FF_DISABLE_DEPRECATION_WARNINGS
3582 /* Send the last GOB if RTP */
3583 if (s->avctx->rtp_callback) {
3584 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3585 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3586 /* Call the RTP callback to send the last GOB */
3588 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3590 FF_ENABLE_DEPRECATION_WARNINGS
3596 #define MERGE(field) dst->field += src->field; src->field=0
3597 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3598 MERGE(me.scene_change_score);
3599 MERGE(me.mc_mb_var_sum_temp);
3600 MERGE(me.mb_var_sum_temp);
3603 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3606 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3607 MERGE(dct_count[1]);
3616 MERGE(er.error_count);
3617 MERGE(padding_bug_score);
3618 MERGE(current_picture.encoding_error[0]);
3619 MERGE(current_picture.encoding_error[1]);
3620 MERGE(current_picture.encoding_error[2]);
3622 if (dst->noise_reduction){
3623 for(i=0; i<64; i++){
3624 MERGE(dct_error_sum[0][i]);
3625 MERGE(dct_error_sum[1][i]);
3629 av_assert1(put_bits_count(&src->pb) % 8 ==0);
3630 av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3631 ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3632 flush_put_bits(&dst->pb);
3635 static int estimate_qp(MpegEncContext *s, int dry_run){
3636 if (s->next_lambda){
3637 s->current_picture_ptr->f->quality =
3638 s->current_picture.f->quality = s->next_lambda;
3639 if(!dry_run) s->next_lambda= 0;
3640 } else if (!s->fixed_qscale) {
3641 int quality = ff_rate_estimate_qscale(s, dry_run);
3642 s->current_picture_ptr->f->quality =
3643 s->current_picture.f->quality = quality;
3644 if (s->current_picture.f->quality < 0)
3648 if(s->adaptive_quant){
3649 switch(s->codec_id){
3650 case AV_CODEC_ID_MPEG4:
3651 if (CONFIG_MPEG4_ENCODER)
3652 ff_clean_mpeg4_qscales(s);
3654 case AV_CODEC_ID_H263:
3655 case AV_CODEC_ID_H263P:
3656 case AV_CODEC_ID_FLV1:
3657 if (CONFIG_H263_ENCODER)
3658 ff_clean_h263_qscales(s);
3661 ff_init_qscale_tab(s);
3664 s->lambda= s->lambda_table[0];
3667 s->lambda = s->current_picture.f->quality;
3672 /* must be called before writing the header */
3673 static void set_frame_distances(MpegEncContext * s){
3674 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3675 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3677 if(s->pict_type==AV_PICTURE_TYPE_B){
3678 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3679 av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3681 s->pp_time= s->time - s->last_non_b_time;
3682 s->last_non_b_time= s->time;
3683 av_assert1(s->picture_number==0 || s->pp_time > 0);
3687 static int encode_picture(MpegEncContext *s, int picture_number)
3691 int context_count = s->slice_context_count;
3693 s->picture_number = picture_number;
3695 /* Reset the average MB variance */
3696 s->me.mb_var_sum_temp =
3697 s->me.mc_mb_var_sum_temp = 0;
3699 /* we need to initialize some time vars before we can encode B-frames */
3700 // RAL: Condition added for MPEG1VIDEO
3701 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3702 set_frame_distances(s);
3703 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3704 ff_set_mpeg4_time(s);
3706 s->me.scene_change_score=0;
3708 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3710 if(s->pict_type==AV_PICTURE_TYPE_I){
3711 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3712 else s->no_rounding=0;
3713 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3714 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3715 s->no_rounding ^= 1;
3718 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3719 if (estimate_qp(s,1) < 0)
3721 ff_get_2pass_fcode(s);
3722 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3723 if(s->pict_type==AV_PICTURE_TYPE_B)
3724 s->lambda= s->last_lambda_for[s->pict_type];
3726 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3730 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3731 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3732 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3733 s->q_chroma_intra_matrix = s->q_intra_matrix;
3734 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3737 s->mb_intra=0; //for the rate distortion & bit compare functions
3738 for(i=1; i<context_count; i++){
3739 ret = ff_update_duplicate_context(s->thread_context[i], s);
3747 /* Estimate motion for every MB */
3748 if(s->pict_type != AV_PICTURE_TYPE_I){
3749 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3750 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3751 if (s->pict_type != AV_PICTURE_TYPE_B) {
3752 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3754 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3758 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3759 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3761 for(i=0; i<s->mb_stride*s->mb_height; i++)
3762 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3764 if(!s->fixed_qscale){
3765 /* finding spatial complexity for I-frame rate control */
3766 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3769 for(i=1; i<context_count; i++){
3770 merge_context_after_me(s, s->thread_context[i]);
3772 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3773 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3776 if (s->me.scene_change_score > s->scenechange_threshold &&
3777 s->pict_type == AV_PICTURE_TYPE_P) {
3778 s->pict_type= AV_PICTURE_TYPE_I;
3779 for(i=0; i<s->mb_stride*s->mb_height; i++)
3780 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3781 if(s->msmpeg4_version >= 3)
3783 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3784 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3788 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3789 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3791 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3793 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3794 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3795 s->f_code= FFMAX3(s->f_code, a, b);
3798 ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
3799 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3800 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3804 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3805 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3810 if(s->pict_type==AV_PICTURE_TYPE_B){
3813 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3814 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3815 s->f_code = FFMAX(a, b);
3817 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3818 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3819 s->b_code = FFMAX(a, b);
3821 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3822 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3823 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3824 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3825 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3827 for(dir=0; dir<2; dir++){
3830 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3831 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3832 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3833 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3841 if (estimate_qp(s, 0) < 0)
3844 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3845 s->pict_type == AV_PICTURE_TYPE_I &&
3846 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3847 s->qscale= 3; //reduce clipping problems
3849 if (s->out_format == FMT_MJPEG) {
3850 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3851 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3853 if (s->avctx->intra_matrix) {
3855 luma_matrix = s->avctx->intra_matrix;
3857 if (s->avctx->chroma_intra_matrix)
3858 chroma_matrix = s->avctx->chroma_intra_matrix;
3860 /* for mjpeg, we do include qscale in the matrix */
3862 int j = s->idsp.idct_permutation[i];
3864 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3865 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3867 s->y_dc_scale_table=
3868 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3869 s->chroma_intra_matrix[0] =
3870 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3871 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3872 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3873 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3874 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3877 if(s->codec_id == AV_CODEC_ID_AMV){
3878 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3879 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3881 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3883 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3884 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3886 s->y_dc_scale_table= y;
3887 s->c_dc_scale_table= c;
3888 s->intra_matrix[0] = 13;
3889 s->chroma_intra_matrix[0] = 14;
3890 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3891 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3892 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3893 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3897 if (s->out_format == FMT_SPEEDHQ) {
3898 s->y_dc_scale_table=
3899 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3902 //FIXME var duplication
3903 s->current_picture_ptr->f->key_frame =
3904 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3905 s->current_picture_ptr->f->pict_type =
3906 s->current_picture.f->pict_type = s->pict_type;
3908 if (s->current_picture.f->key_frame)
3909 s->picture_in_gop_number=0;
3911 s->mb_x = s->mb_y = 0;
3912 s->last_bits= put_bits_count(&s->pb);
3913 switch(s->out_format) {
3915 if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3916 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3917 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3920 if (CONFIG_SPEEDHQ_ENCODER)
3921 ff_speedhq_encode_picture_header(s);
3924 if (CONFIG_H261_ENCODER)
3925 ff_h261_encode_picture_header(s, picture_number);
3928 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3929 ff_wmv2_encode_picture_header(s, picture_number);
3930 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3931 ff_msmpeg4_encode_picture_header(s, picture_number);
3932 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3933 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3936 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3937 ret = ff_rv10_encode_picture_header(s, picture_number);
3941 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3942 ff_rv20_encode_picture_header(s, picture_number);
3943 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3944 ff_flv_encode_picture_header(s, picture_number);
3945 else if (CONFIG_H263_ENCODER)
3946 ff_h263_encode_picture_header(s, picture_number);
3949 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3950 ff_mpeg1_encode_picture_header(s, picture_number);
3955 bits= put_bits_count(&s->pb);
3956 s->header_bits= bits - s->last_bits;
3958 for(i=1; i<context_count; i++){
3959 update_duplicate_context_after_me(s->thread_context[i], s);
3961 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3962 for(i=1; i<context_count; i++){
3963 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3964 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3965 merge_context_after_encode(s, s->thread_context[i]);
3971 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3972 const int intra= s->mb_intra;
3975 s->dct_count[intra]++;
3977 for(i=0; i<64; i++){
3978 int level= block[i];
3982 s->dct_error_sum[intra][i] += level;
3983 level -= s->dct_offset[intra][i];
3984 if(level<0) level=0;
3986 s->dct_error_sum[intra][i] -= level;
3987 level += s->dct_offset[intra][i];
3988 if(level>0) level=0;
3995 static int dct_quantize_trellis_c(MpegEncContext *s,
3996 int16_t *block, int n,
3997 int qscale, int *overflow){
3999 const uint16_t *matrix;
4000 const uint8_t *scantable;
4001 const uint8_t *perm_scantable;
4003 unsigned int threshold1, threshold2;
4015 int coeff_count[64];
4016 int qmul, qadd, start_i, last_non_zero, i, dc;
4017 const int esc_length= s->ac_esc_length;
4019 uint8_t * last_length;
4020 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4023 s->fdsp.fdct(block);
4025 if(s->dct_error_sum)
4026 s->denoise_dct(s, block);
4028 qadd= ((qscale-1)|1)*8;
4030 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4031 else mpeg2_qscale = qscale << 1;
4035 scantable= s->intra_scantable.scantable;
4036 perm_scantable= s->intra_scantable.permutated;
4044 /* For AIC we skip quant/dequant of INTRADC */
4049 /* note: block[0] is assumed to be positive */
4050 block[0] = (block[0] + (q >> 1)) / q;
4053 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4054 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4055 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4056 bias= 1<<(QMAT_SHIFT-1);
4058 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4059 length = s->intra_chroma_ac_vlc_length;
4060 last_length= s->intra_chroma_ac_vlc_last_length;
4062 length = s->intra_ac_vlc_length;
4063 last_length= s->intra_ac_vlc_last_length;
4066 scantable= s->inter_scantable.scantable;
4067 perm_scantable= s->inter_scantable.permutated;
4070 qmat = s->q_inter_matrix[qscale];
4071 matrix = s->inter_matrix;
4072 length = s->inter_ac_vlc_length;
4073 last_length= s->inter_ac_vlc_last_length;
4077 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4078 threshold2= (threshold1<<1);
4080 for(i=63; i>=start_i; i--) {
4081 const int j = scantable[i];
4082 int level = block[j] * qmat[j];
4084 if(((unsigned)(level+threshold1))>threshold2){
4090 for(i=start_i; i<=last_non_zero; i++) {
4091 const int j = scantable[i];
4092 int level = block[j] * qmat[j];
4094 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4095 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4096 if(((unsigned)(level+threshold1))>threshold2){
4098 level= (bias + level)>>QMAT_SHIFT;
4100 coeff[1][i]= level-1;
4101 // coeff[2][k]= level-2;
4103 level= (bias - level)>>QMAT_SHIFT;
4104 coeff[0][i]= -level;
4105 coeff[1][i]= -level+1;
4106 // coeff[2][k]= -level+2;
4108 coeff_count[i]= FFMIN(level, 2);
4109 av_assert2(coeff_count[i]);
4112 coeff[0][i]= (level>>31)|1;
4117 *overflow= s->max_qcoeff < max; //overflow might have happened
4119 if(last_non_zero < start_i){
4120 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4121 return last_non_zero;
4124 score_tab[start_i]= 0;
4125 survivor[0]= start_i;
4128 for(i=start_i; i<=last_non_zero; i++){
4129 int level_index, j, zero_distortion;
4130 int dct_coeff= FFABS(block[ scantable[i] ]);
4131 int best_score=256*256*256*120;
4133 if (s->fdsp.fdct == ff_fdct_ifast)
4134 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4135 zero_distortion= dct_coeff*dct_coeff;
4137 for(level_index=0; level_index < coeff_count[i]; level_index++){
4139 int level= coeff[level_index][i];
4140 const int alevel= FFABS(level);
4145 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4146 unquant_coeff= alevel*qmul + qadd;
4147 } else if(s->out_format == FMT_MJPEG) {
4148 j = s->idsp.idct_permutation[scantable[i]];
4149 unquant_coeff = alevel * matrix[j] * 8;
4151 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4153 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4154 unquant_coeff = (unquant_coeff - 1) | 1;
4156 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4157 unquant_coeff = (unquant_coeff - 1) | 1;
4162 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4164 if((level&(~127)) == 0){
4165 for(j=survivor_count-1; j>=0; j--){
4166 int run= i - survivor[j];
4167 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4168 score += score_tab[i-run];
4170 if(score < best_score){
4173 level_tab[i+1]= level-64;
4177 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4178 for(j=survivor_count-1; j>=0; j--){
4179 int run= i - survivor[j];
4180 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4181 score += score_tab[i-run];
4182 if(score < last_score){
4185 last_level= level-64;
4191 distortion += esc_length*lambda;
4192 for(j=survivor_count-1; j>=0; j--){
4193 int run= i - survivor[j];
4194 int score= distortion + score_tab[i-run];
4196 if(score < best_score){
4199 level_tab[i+1]= level-64;
4203 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4204 for(j=survivor_count-1; j>=0; j--){
4205 int run= i - survivor[j];
4206 int score= distortion + score_tab[i-run];
4207 if(score < last_score){
4210 last_level= level-64;
4218 score_tab[i+1]= best_score;
4220 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4221 if(last_non_zero <= 27){
4222 for(; survivor_count; survivor_count--){
4223 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4227 for(; survivor_count; survivor_count--){
4228 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4233 survivor[ survivor_count++ ]= i+1;
4236 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4237 last_score= 256*256*256*120;
4238 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4239 int score= score_tab[i];
4241 score += lambda * 2; // FIXME more exact?
4243 if(score < last_score){
4246 last_level= level_tab[i];
4247 last_run= run_tab[i];
4252 s->coded_score[n] = last_score;
4254 dc= FFABS(block[0]);
4255 last_non_zero= last_i - 1;
4256 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4258 if(last_non_zero < start_i)
4259 return last_non_zero;
4261 if(last_non_zero == 0 && start_i == 0){
4263 int best_score= dc * dc;
4265 for(i=0; i<coeff_count[0]; i++){
4266 int level= coeff[i][0];
4267 int alevel= FFABS(level);
4268 int unquant_coeff, score, distortion;
4270 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4271 unquant_coeff= (alevel*qmul + qadd)>>3;
4273 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4274 unquant_coeff = (unquant_coeff - 1) | 1;
4276 unquant_coeff = (unquant_coeff + 4) >> 3;
4277 unquant_coeff<<= 3 + 3;
4279 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4281 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4282 else score= distortion + esc_length*lambda;
4284 if(score < best_score){
4286 best_level= level - 64;
4289 block[0]= best_level;
4290 s->coded_score[n] = best_score - dc*dc;
4291 if(best_level == 0) return -1;
4292 else return last_non_zero;
4296 av_assert2(last_level);
4298 block[ perm_scantable[last_non_zero] ]= last_level;
4301 for(; i>start_i; i -= run_tab[i] + 1){
4302 block[ perm_scantable[i-1] ]= level_tab[i];
4305 return last_non_zero;
4308 static int16_t basis[64][64];
4310 static void build_basis(uint8_t *perm){
4317 double s= 0.25*(1<<BASIS_SHIFT);
4319 int perm_index= perm[index];
4320 if(i==0) s*= sqrt(0.5);
4321 if(j==0) s*= sqrt(0.5);
4322 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4329 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4330 int16_t *block, int16_t *weight, int16_t *orig,
4333 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4334 const uint8_t *scantable;
4335 const uint8_t *perm_scantable;
4336 // unsigned int threshold1, threshold2;
4341 int qmul, qadd, start_i, last_non_zero, i, dc;
4343 uint8_t * last_length;
4345 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4347 if(basis[0][0] == 0)
4348 build_basis(s->idsp.idct_permutation);
4353 scantable= s->intra_scantable.scantable;
4354 perm_scantable= s->intra_scantable.permutated;
4361 /* For AIC we skip quant/dequant of INTRADC */
4365 q <<= RECON_SHIFT-3;
4366 /* note: block[0] is assumed to be positive */
4368 // block[0] = (block[0] + (q >> 1)) / q;
4370 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4371 // bias= 1<<(QMAT_SHIFT-1);
4372 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4373 length = s->intra_chroma_ac_vlc_length;
4374 last_length= s->intra_chroma_ac_vlc_last_length;
4376 length = s->intra_ac_vlc_length;
4377 last_length= s->intra_ac_vlc_last_length;
4380 scantable= s->inter_scantable.scantable;
4381 perm_scantable= s->inter_scantable.permutated;
4384 length = s->inter_ac_vlc_length;
4385 last_length= s->inter_ac_vlc_last_length;
4387 last_non_zero = s->block_last_index[n];
4389 dc += (1<<(RECON_SHIFT-1));
4390 for(i=0; i<64; i++){
4391 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4395 for(i=0; i<64; i++){
4400 w= FFABS(weight[i]) + qns*one;
4401 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4404 // w=weight[i] = (63*qns + (w/2)) / w;
4407 av_assert2(w<(1<<6));
4410 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4414 for(i=start_i; i<=last_non_zero; i++){
4415 int j= perm_scantable[i];
4416 const int level= block[j];
4420 if(level<0) coeff= qmul*level - qadd;
4421 else coeff= qmul*level + qadd;
4422 run_tab[rle_index++]=run;
4425 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4432 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4435 int run2, best_unquant_change=0, analyze_gradient;
4436 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4438 if(analyze_gradient){
4439 for(i=0; i<64; i++){
4442 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4448 const int level= block[0];
4449 int change, old_coeff;
4451 av_assert2(s->mb_intra);
4455 for(change=-1; change<=1; change+=2){
4456 int new_level= level + change;
4457 int score, new_coeff;
4459 new_coeff= q*new_level;
4460 if(new_coeff >= 2048 || new_coeff < 0)
4463 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4464 new_coeff - old_coeff);
4465 if(score<best_score){
4468 best_change= change;
4469 best_unquant_change= new_coeff - old_coeff;
4476 run2= run_tab[rle_index++];
4480 for(i=start_i; i<64; i++){
4481 int j= perm_scantable[i];
4482 const int level= block[j];
4483 int change, old_coeff;
4485 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4489 if(level<0) old_coeff= qmul*level - qadd;
4490 else old_coeff= qmul*level + qadd;
4491 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4495 av_assert2(run2>=0 || i >= last_non_zero );
4498 for(change=-1; change<=1; change+=2){
4499 int new_level= level + change;
4500 int score, new_coeff, unquant_change;
4503 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4507 if(new_level<0) new_coeff= qmul*new_level - qadd;
4508 else new_coeff= qmul*new_level + qadd;
4509 if(new_coeff >= 2048 || new_coeff <= -2048)
4511 //FIXME check for overflow
4514 if(level < 63 && level > -63){
4515 if(i < last_non_zero)
4516 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4517 - length[UNI_AC_ENC_INDEX(run, level+64)];
4519 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4520 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4523 av_assert2(FFABS(new_level)==1);
4525 if(analyze_gradient){
4526 int g= d1[ scantable[i] ];
4527 if(g && (g^new_level) >= 0)
4531 if(i < last_non_zero){
4532 int next_i= i + run2 + 1;
4533 int next_level= block[ perm_scantable[next_i] ] + 64;
4535 if(next_level&(~127))
4538 if(next_i < last_non_zero)
4539 score += length[UNI_AC_ENC_INDEX(run, 65)]
4540 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4541 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4543 score += length[UNI_AC_ENC_INDEX(run, 65)]
4544 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4545 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4547 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4549 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4550 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4556 av_assert2(FFABS(level)==1);
4558 if(i < last_non_zero){
4559 int next_i= i + run2 + 1;
4560 int next_level= block[ perm_scantable[next_i] ] + 64;
4562 if(next_level&(~127))
4565 if(next_i < last_non_zero)
4566 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4567 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4568 - length[UNI_AC_ENC_INDEX(run, 65)];
4570 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4571 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4572 - length[UNI_AC_ENC_INDEX(run, 65)];
4574 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4576 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4577 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4584 unquant_change= new_coeff - old_coeff;
4585 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4587 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4589 if(score<best_score){
4592 best_change= change;
4593 best_unquant_change= unquant_change;
4597 prev_level= level + 64;
4598 if(prev_level&(~127))
4608 int j= perm_scantable[ best_coeff ];
4610 block[j] += best_change;
4612 if(best_coeff > last_non_zero){
4613 last_non_zero= best_coeff;
4614 av_assert2(block[j]);
4616 for(; last_non_zero>=start_i; last_non_zero--){
4617 if(block[perm_scantable[last_non_zero]])
4624 for(i=start_i; i<=last_non_zero; i++){
4625 int j= perm_scantable[i];
4626 const int level= block[j];
4629 run_tab[rle_index++]=run;
4636 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4642 return last_non_zero;
4646 * Permute an 8x8 block according to permutation.
4647 * @param block the block which will be permuted according to
4648 * the given permutation vector
4649 * @param permutation the permutation vector
4650 * @param last the last non zero coefficient in scantable order, used to
4651 * speed the permutation up
4652 * @param scantable the used scantable, this is only used to speed the
4653 * permutation up, the block is not (inverse) permutated
4654 * to scantable order!
4656 void ff_block_permute(int16_t *block, uint8_t *permutation,
4657 const uint8_t *scantable, int last)
4664 //FIXME it is ok but not clean and might fail for some permutations
4665 // if (permutation[1] == 1)
4668 for (i = 0; i <= last; i++) {
4669 const int j = scantable[i];
4674 for (i = 0; i <= last; i++) {
4675 const int j = scantable[i];
4676 const int perm_j = permutation[j];
4677 block[perm_j] = temp[j];
4681 int ff_dct_quantize_c(MpegEncContext *s,
4682 int16_t *block, int n,
4683 int qscale, int *overflow)
4685 int i, j, level, last_non_zero, q, start_i;
4687 const uint8_t *scantable;
4690 unsigned int threshold1, threshold2;
4692 s->fdsp.fdct(block);
4694 if(s->dct_error_sum)
4695 s->denoise_dct(s, block);
4698 scantable= s->intra_scantable.scantable;
4706 /* For AIC we skip quant/dequant of INTRADC */
4709 /* note: block[0] is assumed to be positive */
4710 block[0] = (block[0] + (q >> 1)) / q;
4713 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4714 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4716 scantable= s->inter_scantable.scantable;
4719 qmat = s->q_inter_matrix[qscale];
4720 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4722 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4723 threshold2= (threshold1<<1);
4724 for(i=63;i>=start_i;i--) {
4726 level = block[j] * qmat[j];
4728 if(((unsigned)(level+threshold1))>threshold2){
4735 for(i=start_i; i<=last_non_zero; i++) {
4737 level = block[j] * qmat[j];
4739 // if( bias+level >= (1<<QMAT_SHIFT)
4740 // || bias-level >= (1<<QMAT_SHIFT)){
4741 if(((unsigned)(level+threshold1))>threshold2){
4743 level= (bias + level)>>QMAT_SHIFT;
4746 level= (bias - level)>>QMAT_SHIFT;
4754 *overflow= s->max_qcoeff < max; //overflow might have happened
4756 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4757 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4758 ff_block_permute(block, s->idsp.idct_permutation,
4759 scantable, last_non_zero);
4761 return last_non_zero;
4764 #define OFFSET(x) offsetof(MpegEncContext, x)
4765 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4766 static const AVOption h263_options[] = {
4767 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4768 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4773 static const AVClass h263_class = {
4774 .class_name = "H.263 encoder",
4775 .item_name = av_default_item_name,
4776 .option = h263_options,
4777 .version = LIBAVUTIL_VERSION_INT,
4780 AVCodec ff_h263_encoder = {
4782 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4783 .type = AVMEDIA_TYPE_VIDEO,
4784 .id = AV_CODEC_ID_H263,
4785 .priv_data_size = sizeof(MpegEncContext),
4786 .init = ff_mpv_encode_init,
4787 .encode2 = ff_mpv_encode_picture,
4788 .close = ff_mpv_encode_end,
4789 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4790 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4791 .priv_class = &h263_class,
4794 static const AVOption h263p_options[] = {
4795 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4796 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4797 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4798 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4802 static const AVClass h263p_class = {
4803 .class_name = "H.263p encoder",
4804 .item_name = av_default_item_name,
4805 .option = h263p_options,
4806 .version = LIBAVUTIL_VERSION_INT,
4809 AVCodec ff_h263p_encoder = {
4811 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4812 .type = AVMEDIA_TYPE_VIDEO,
4813 .id = AV_CODEC_ID_H263P,
4814 .priv_data_size = sizeof(MpegEncContext),
4815 .init = ff_mpv_encode_init,
4816 .encode2 = ff_mpv_encode_picture,
4817 .close = ff_mpv_encode_end,
4818 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4819 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4820 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4821 .priv_class = &h263p_class,
4824 static const AVClass msmpeg4v2_class = {
4825 .class_name = "msmpeg4v2 encoder",
4826 .item_name = av_default_item_name,
4827 .option = ff_mpv_generic_options,
4828 .version = LIBAVUTIL_VERSION_INT,
4831 AVCodec ff_msmpeg4v2_encoder = {
4832 .name = "msmpeg4v2",
4833 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4834 .type = AVMEDIA_TYPE_VIDEO,
4835 .id = AV_CODEC_ID_MSMPEG4V2,
4836 .priv_data_size = sizeof(MpegEncContext),
4837 .init = ff_mpv_encode_init,
4838 .encode2 = ff_mpv_encode_picture,
4839 .close = ff_mpv_encode_end,
4840 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4841 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4842 .priv_class = &msmpeg4v2_class,
4845 static const AVClass msmpeg4v3_class = {
4846 .class_name = "msmpeg4v3 encoder",
4847 .item_name = av_default_item_name,
4848 .option = ff_mpv_generic_options,
4849 .version = LIBAVUTIL_VERSION_INT,
4852 AVCodec ff_msmpeg4v3_encoder = {
4854 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4855 .type = AVMEDIA_TYPE_VIDEO,
4856 .id = AV_CODEC_ID_MSMPEG4V3,
4857 .priv_data_size = sizeof(MpegEncContext),
4858 .init = ff_mpv_encode_init,
4859 .encode2 = ff_mpv_encode_picture,
4860 .close = ff_mpv_encode_end,
4861 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4862 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4863 .priv_class = &msmpeg4v3_class,
4866 static const AVClass wmv1_class = {
4867 .class_name = "wmv1 encoder",
4868 .item_name = av_default_item_name,
4869 .option = ff_mpv_generic_options,
4870 .version = LIBAVUTIL_VERSION_INT,
4873 AVCodec ff_wmv1_encoder = {
4875 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4876 .type = AVMEDIA_TYPE_VIDEO,
4877 .id = AV_CODEC_ID_WMV1,
4878 .priv_data_size = sizeof(MpegEncContext),
4879 .init = ff_mpv_encode_init,
4880 .encode2 = ff_mpv_encode_picture,
4881 .close = ff_mpv_encode_end,
4882 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4883 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4884 .priv_class = &wmv1_class,