2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/mem_internal.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/thread.h"
47 #include "mpegvideo.h"
48 #include "mpegvideodata.h"
52 #include "mjpegenc_common.h"
54 #include "mpegutils.h"
56 #include "speedhqenc.h"
58 #include "pixblockdsp.h"
62 #include "aandcttab.h"
64 #include "mpeg4video.h"
66 #include "bytestream.h"
69 #include "packet_internal.h"
74 #define QUANT_BIAS_SHIFT 8
76 #define QMAT_SHIFT_MMX 16
79 static int encode_picture(MpegEncContext *s, int picture_number);
80 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
81 static int sse_mb(MpegEncContext *s);
82 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
83 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
85 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
86 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
88 const AVOption ff_mpv_generic_options[] = {
93 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
94 uint16_t (*qmat16)[2][64],
95 const uint16_t *quant_matrix,
96 int bias, int qmin, int qmax, int intra)
98 FDCTDSPContext *fdsp = &s->fdsp;
102 for (qscale = qmin; qscale <= qmax; qscale++) {
106 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
107 else qscale2 = qscale << 1;
109 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
111 fdsp->fdct == ff_faandct ||
112 #endif /* CONFIG_FAANDCT */
113 fdsp->fdct == ff_jpeg_fdct_islow_10) {
114 for (i = 0; i < 64; i++) {
115 const int j = s->idsp.idct_permutation[i];
116 int64_t den = (int64_t) qscale2 * quant_matrix[j];
117 /* 16 <= qscale * quant_matrix[i] <= 7905
118 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
119 * 19952 <= x <= 249205026
120 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
121 * 3444240 >= (1 << 36) / (x) >= 275 */
123 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
125 } else if (fdsp->fdct == ff_fdct_ifast) {
126 for (i = 0; i < 64; i++) {
127 const int j = s->idsp.idct_permutation[i];
128 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
129 /* 16 <= qscale * quant_matrix[i] <= 7905
130 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
131 * 19952 <= x <= 249205026
132 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
133 * 3444240 >= (1 << 36) / (x) >= 275 */
135 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
138 for (i = 0; i < 64; i++) {
139 const int j = s->idsp.idct_permutation[i];
140 int64_t den = (int64_t) qscale2 * quant_matrix[j];
141 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
142 * Assume x = qscale * quant_matrix[i]
144 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
145 * so 32768 >= (1 << 19) / (x) >= 67 */
146 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
147 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
148 // (qscale * quant_matrix[i]);
149 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
151 if (qmat16[qscale][0][i] == 0 ||
152 qmat16[qscale][0][i] == 128 * 256)
153 qmat16[qscale][0][i] = 128 * 256 - 1;
154 qmat16[qscale][1][i] =
155 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
156 qmat16[qscale][0][i]);
160 for (i = intra; i < 64; i++) {
162 if (fdsp->fdct == ff_fdct_ifast) {
163 max = (8191LL * ff_aanscales[i]) >> 14;
165 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
171 av_log(s->avctx, AV_LOG_INFO,
172 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
177 static inline void update_qscale(MpegEncContext *s)
179 if (s->q_scale_type == 1 && 0) {
181 int bestdiff=INT_MAX;
184 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
185 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
186 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
187 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
189 if (diff < bestdiff) {
196 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
197 (FF_LAMBDA_SHIFT + 7);
198 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
201 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
205 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
211 for (i = 0; i < 64; i++) {
212 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
219 * init s->current_picture.qscale_table from s->lambda_table
221 void ff_init_qscale_tab(MpegEncContext *s)
223 int8_t * const qscale_table = s->current_picture.qscale_table;
226 for (i = 0; i < s->mb_num; i++) {
227 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
228 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
229 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
234 static void update_duplicate_context_after_me(MpegEncContext *dst,
237 #define COPY(a) dst->a= src->a
239 COPY(current_picture);
245 COPY(picture_in_gop_number);
246 COPY(gop_picture_number);
247 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
248 COPY(progressive_frame); // FIXME don't set in encode_header
249 COPY(partitioned_frame); // FIXME don't set in encode_header
253 static void mpv_encode_init_static(void)
255 for (int i = -16; i < 16; i++)
256 default_fcode_tab[i + MAX_MV] = 1;
260 * Set the given MpegEncContext to defaults for encoding.
261 * the changed fields will not depend upon the prior state of the MpegEncContext.
263 static void mpv_encode_defaults(MpegEncContext *s)
265 static AVOnce init_static_once = AV_ONCE_INIT;
267 ff_mpv_common_defaults(s);
269 ff_thread_once(&init_static_once, mpv_encode_init_static);
271 s->me.mv_penalty = default_mv_penalty;
272 s->fcode_tab = default_fcode_tab;
274 s->input_picture_number = 0;
275 s->picture_in_gop_number = 0;
278 av_cold int ff_dct_encode_init(MpegEncContext *s)
281 ff_dct_encode_init_x86(s);
283 if (CONFIG_H263_ENCODER)
284 ff_h263dsp_init(&s->h263dsp);
285 if (!s->dct_quantize)
286 s->dct_quantize = ff_dct_quantize_c;
288 s->denoise_dct = denoise_dct_c;
289 s->fast_dct_quantize = s->dct_quantize;
290 if (s->avctx->trellis)
291 s->dct_quantize = dct_quantize_trellis_c;
296 /* init video encoder */
297 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
299 MpegEncContext *s = avctx->priv_data;
300 AVCPBProperties *cpb_props;
301 int i, ret, format_supported;
303 mpv_encode_defaults(s);
305 switch (avctx->codec_id) {
306 case AV_CODEC_ID_MPEG2VIDEO:
307 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
308 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
309 av_log(avctx, AV_LOG_ERROR,
310 "only YUV420 and YUV422 are supported\n");
311 return AVERROR(EINVAL);
314 case AV_CODEC_ID_MJPEG:
315 case AV_CODEC_ID_AMV:
316 format_supported = 0;
317 /* JPEG color space */
318 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
319 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
320 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
321 (avctx->color_range == AVCOL_RANGE_JPEG &&
322 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
323 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
324 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
325 format_supported = 1;
326 /* MPEG color space */
327 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
328 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
329 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
330 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
331 format_supported = 1;
333 if (!format_supported) {
334 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
335 return AVERROR(EINVAL);
338 case AV_CODEC_ID_SPEEDHQ:
339 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
340 avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
341 avctx->pix_fmt != AV_PIX_FMT_YUV444P) {
342 av_log(avctx, AV_LOG_ERROR,
343 "only YUV420/YUV422/YUV444 are supported (no alpha support yet)\n");
344 return AVERROR(EINVAL);
348 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
349 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
350 return AVERROR(EINVAL);
354 switch (avctx->pix_fmt) {
355 case AV_PIX_FMT_YUVJ444P:
356 case AV_PIX_FMT_YUV444P:
357 s->chroma_format = CHROMA_444;
359 case AV_PIX_FMT_YUVJ422P:
360 case AV_PIX_FMT_YUV422P:
361 s->chroma_format = CHROMA_422;
363 case AV_PIX_FMT_YUVJ420P:
364 case AV_PIX_FMT_YUV420P:
366 s->chroma_format = CHROMA_420;
370 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
372 #if FF_API_PRIVATE_OPT
373 FF_DISABLE_DEPRECATION_WARNINGS
374 if (avctx->rtp_payload_size)
375 s->rtp_payload_size = avctx->rtp_payload_size;
376 if (avctx->me_penalty_compensation)
377 s->me_penalty_compensation = avctx->me_penalty_compensation;
379 s->me_pre = avctx->pre_me;
380 FF_ENABLE_DEPRECATION_WARNINGS
383 s->bit_rate = avctx->bit_rate;
384 s->width = avctx->width;
385 s->height = avctx->height;
386 if (avctx->gop_size > 600 &&
387 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
388 av_log(avctx, AV_LOG_WARNING,
389 "keyframe interval too large!, reducing it from %d to %d\n",
390 avctx->gop_size, 600);
391 avctx->gop_size = 600;
393 s->gop_size = avctx->gop_size;
395 if (avctx->max_b_frames > MAX_B_FRAMES) {
396 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
397 "is %d.\n", MAX_B_FRAMES);
398 avctx->max_b_frames = MAX_B_FRAMES;
400 s->max_b_frames = avctx->max_b_frames;
401 s->codec_id = avctx->codec->id;
402 s->strict_std_compliance = avctx->strict_std_compliance;
403 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
404 s->rtp_mode = !!s->rtp_payload_size;
405 s->intra_dc_precision = avctx->intra_dc_precision;
407 // workaround some differences between how applications specify dc precision
408 if (s->intra_dc_precision < 0) {
409 s->intra_dc_precision += 8;
410 } else if (s->intra_dc_precision >= 8)
411 s->intra_dc_precision -= 8;
413 if (s->intra_dc_precision < 0) {
414 av_log(avctx, AV_LOG_ERROR,
415 "intra dc precision must be positive, note some applications use"
416 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
417 return AVERROR(EINVAL);
420 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
423 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
424 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
425 return AVERROR(EINVAL);
427 s->user_specified_pts = AV_NOPTS_VALUE;
429 if (s->gop_size <= 1) {
437 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
439 s->adaptive_quant = (avctx->lumi_masking ||
440 avctx->dark_masking ||
441 avctx->temporal_cplx_masking ||
442 avctx->spatial_cplx_masking ||
445 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
448 s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
450 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
451 switch(avctx->codec_id) {
452 case AV_CODEC_ID_MPEG1VIDEO:
453 case AV_CODEC_ID_MPEG2VIDEO:
454 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
456 case AV_CODEC_ID_MPEG4:
457 case AV_CODEC_ID_MSMPEG4V1:
458 case AV_CODEC_ID_MSMPEG4V2:
459 case AV_CODEC_ID_MSMPEG4V3:
460 if (avctx->rc_max_rate >= 15000000) {
461 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
462 } else if(avctx->rc_max_rate >= 2000000) {
463 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
464 } else if(avctx->rc_max_rate >= 384000) {
465 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
467 avctx->rc_buffer_size = 40;
468 avctx->rc_buffer_size *= 16384;
471 if (avctx->rc_buffer_size) {
472 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
476 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
477 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
478 return AVERROR(EINVAL);
481 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
482 av_log(avctx, AV_LOG_INFO,
483 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
486 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
487 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
488 return AVERROR(EINVAL);
491 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
492 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
493 return AVERROR(EINVAL);
496 if (avctx->rc_max_rate &&
497 avctx->rc_max_rate == avctx->bit_rate &&
498 avctx->rc_max_rate != avctx->rc_min_rate) {
499 av_log(avctx, AV_LOG_INFO,
500 "impossible bitrate constraints, this will fail\n");
503 if (avctx->rc_buffer_size &&
504 avctx->bit_rate * (int64_t)avctx->time_base.num >
505 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
506 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
507 return AVERROR(EINVAL);
510 if (!s->fixed_qscale &&
511 avctx->bit_rate * av_q2d(avctx->time_base) >
512 avctx->bit_rate_tolerance) {
513 av_log(avctx, AV_LOG_WARNING,
514 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
515 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
518 if (avctx->rc_max_rate &&
519 avctx->rc_min_rate == avctx->rc_max_rate &&
520 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
521 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
522 90000LL * (avctx->rc_buffer_size - 1) >
523 avctx->rc_max_rate * 0xFFFFLL) {
524 av_log(avctx, AV_LOG_INFO,
525 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
526 "specified vbv buffer is too large for the given bitrate!\n");
529 if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
530 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
531 s->codec_id != AV_CODEC_ID_FLV1) {
532 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
533 return AVERROR(EINVAL);
536 if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
537 av_log(avctx, AV_LOG_ERROR,
538 "OBMC is only supported with simple mb decision\n");
539 return AVERROR(EINVAL);
542 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
543 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
544 return AVERROR(EINVAL);
547 if (s->max_b_frames &&
548 s->codec_id != AV_CODEC_ID_MPEG4 &&
549 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
550 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
551 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
552 return AVERROR(EINVAL);
554 if (s->max_b_frames < 0) {
555 av_log(avctx, AV_LOG_ERROR,
556 "max b frames must be 0 or positive for mpegvideo based encoders\n");
557 return AVERROR(EINVAL);
560 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
561 s->codec_id == AV_CODEC_ID_H263 ||
562 s->codec_id == AV_CODEC_ID_H263P) &&
563 (avctx->sample_aspect_ratio.num > 255 ||
564 avctx->sample_aspect_ratio.den > 255)) {
565 av_log(avctx, AV_LOG_WARNING,
566 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
567 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
568 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
569 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
572 if ((s->codec_id == AV_CODEC_ID_H263 ||
573 s->codec_id == AV_CODEC_ID_H263P) &&
574 (avctx->width > 2048 ||
575 avctx->height > 1152 )) {
576 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
577 return AVERROR(EINVAL);
579 if ((s->codec_id == AV_CODEC_ID_H263 ||
580 s->codec_id == AV_CODEC_ID_H263P) &&
581 ((avctx->width &3) ||
582 (avctx->height&3) )) {
583 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
584 return AVERROR(EINVAL);
587 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
588 (avctx->width > 4095 ||
589 avctx->height > 4095 )) {
590 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
591 return AVERROR(EINVAL);
594 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
595 (avctx->width > 16383 ||
596 avctx->height > 16383 )) {
597 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
598 return AVERROR(EINVAL);
601 if (s->codec_id == AV_CODEC_ID_RV10 &&
603 avctx->height&15 )) {
604 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
605 return AVERROR(EINVAL);
608 if (s->codec_id == AV_CODEC_ID_RV20 &&
611 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
612 return AVERROR(EINVAL);
615 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
616 s->codec_id == AV_CODEC_ID_WMV2) &&
618 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
619 return AVERROR(EINVAL);
622 if ((avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
623 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
624 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
625 return AVERROR(EINVAL);
628 #if FF_API_PRIVATE_OPT
629 FF_DISABLE_DEPRECATION_WARNINGS
630 if (avctx->mpeg_quant)
631 s->mpeg_quant = avctx->mpeg_quant;
632 FF_ENABLE_DEPRECATION_WARNINGS
635 // FIXME mpeg2 uses that too
636 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
637 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
638 av_log(avctx, AV_LOG_ERROR,
639 "mpeg2 style quantization not supported by codec\n");
640 return AVERROR(EINVAL);
643 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
644 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
645 return AVERROR(EINVAL);
648 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
649 avctx->mb_decision != FF_MB_DECISION_RD) {
650 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
651 return AVERROR(EINVAL);
654 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
655 (s->codec_id == AV_CODEC_ID_AMV ||
656 s->codec_id == AV_CODEC_ID_MJPEG)) {
657 // Used to produce garbage with MJPEG.
658 av_log(avctx, AV_LOG_ERROR,
659 "QP RD is no longer compatible with MJPEG or AMV\n");
660 return AVERROR(EINVAL);
663 #if FF_API_PRIVATE_OPT
664 FF_DISABLE_DEPRECATION_WARNINGS
665 if (avctx->scenechange_threshold)
666 s->scenechange_threshold = avctx->scenechange_threshold;
667 FF_ENABLE_DEPRECATION_WARNINGS
670 if (s->scenechange_threshold < 1000000000 &&
671 (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
672 av_log(avctx, AV_LOG_ERROR,
673 "closed gop with scene change detection are not supported yet, "
674 "set threshold to 1000000000\n");
675 return AVERROR_PATCHWELCOME;
678 if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
679 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
680 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
681 av_log(avctx, AV_LOG_ERROR,
682 "low delay forcing is only available for mpeg2, "
683 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
684 return AVERROR(EINVAL);
686 if (s->max_b_frames != 0) {
687 av_log(avctx, AV_LOG_ERROR,
688 "B-frames cannot be used with low delay\n");
689 return AVERROR(EINVAL);
693 if (s->q_scale_type == 1) {
694 if (avctx->qmax > 28) {
695 av_log(avctx, AV_LOG_ERROR,
696 "non linear quant only supports qmax <= 28 currently\n");
697 return AVERROR_PATCHWELCOME;
701 if (avctx->slices > 1 &&
702 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
703 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
704 return AVERROR(EINVAL);
707 if (avctx->thread_count > 1 &&
708 s->codec_id != AV_CODEC_ID_MPEG4 &&
709 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
710 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
711 s->codec_id != AV_CODEC_ID_MJPEG &&
712 (s->codec_id != AV_CODEC_ID_H263P)) {
713 av_log(avctx, AV_LOG_ERROR,
714 "multi threaded encoding not supported by codec\n");
715 return AVERROR_PATCHWELCOME;
718 if (avctx->thread_count < 1) {
719 av_log(avctx, AV_LOG_ERROR,
720 "automatic thread number detection not supported by codec, "
722 return AVERROR_PATCHWELCOME;
725 if (!avctx->time_base.den || !avctx->time_base.num) {
726 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
727 return AVERROR(EINVAL);
730 #if FF_API_PRIVATE_OPT
731 FF_DISABLE_DEPRECATION_WARNINGS
732 if (avctx->b_frame_strategy)
733 s->b_frame_strategy = avctx->b_frame_strategy;
734 if (avctx->b_sensitivity != 40)
735 s->b_sensitivity = avctx->b_sensitivity;
736 FF_ENABLE_DEPRECATION_WARNINGS
739 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
740 av_log(avctx, AV_LOG_INFO,
741 "notice: b_frame_strategy only affects the first pass\n");
742 s->b_frame_strategy = 0;
745 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
747 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
748 avctx->time_base.den /= i;
749 avctx->time_base.num /= i;
753 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
754 // (a + x * 3 / 8) / x
755 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
756 s->inter_quant_bias = 0;
758 s->intra_quant_bias = 0;
760 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
763 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
764 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
765 return AVERROR(EINVAL);
768 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
770 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
771 avctx->time_base.den > (1 << 16) - 1) {
772 av_log(avctx, AV_LOG_ERROR,
773 "timebase %d/%d not supported by MPEG 4 standard, "
774 "the maximum admitted value for the timebase denominator "
775 "is %d\n", avctx->time_base.num, avctx->time_base.den,
777 return AVERROR(EINVAL);
779 s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
781 switch (avctx->codec->id) {
782 case AV_CODEC_ID_MPEG1VIDEO:
783 s->out_format = FMT_MPEG1;
784 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
785 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
787 case AV_CODEC_ID_MPEG2VIDEO:
788 s->out_format = FMT_MPEG1;
789 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
790 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
793 case AV_CODEC_ID_MJPEG:
794 case AV_CODEC_ID_AMV:
795 s->out_format = FMT_MJPEG;
796 s->intra_only = 1; /* force intra only for jpeg */
797 if (!CONFIG_MJPEG_ENCODER)
798 return AVERROR_ENCODER_NOT_FOUND;
799 if ((ret = ff_mjpeg_encode_init(s)) < 0)
804 case AV_CODEC_ID_SPEEDHQ:
805 s->out_format = FMT_SPEEDHQ;
806 s->intra_only = 1; /* force intra only for SHQ */
807 if (!CONFIG_SPEEDHQ_ENCODER)
808 return AVERROR_ENCODER_NOT_FOUND;
809 if ((ret = ff_speedhq_encode_init(s)) < 0)
814 case AV_CODEC_ID_H261:
815 if (!CONFIG_H261_ENCODER)
816 return AVERROR_ENCODER_NOT_FOUND;
817 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
818 av_log(avctx, AV_LOG_ERROR,
819 "The specified picture size of %dx%d is not valid for the "
820 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
821 s->width, s->height);
822 return AVERROR(EINVAL);
824 s->out_format = FMT_H261;
827 s->rtp_mode = 0; /* Sliced encoding not supported */
829 case AV_CODEC_ID_H263:
830 if (!CONFIG_H263_ENCODER)
831 return AVERROR_ENCODER_NOT_FOUND;
832 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
833 s->width, s->height) == 8) {
834 av_log(avctx, AV_LOG_ERROR,
835 "The specified picture size of %dx%d is not valid for "
836 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
837 "352x288, 704x576, and 1408x1152. "
838 "Try H.263+.\n", s->width, s->height);
839 return AVERROR(EINVAL);
841 s->out_format = FMT_H263;
845 case AV_CODEC_ID_H263P:
846 s->out_format = FMT_H263;
849 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
850 s->modified_quant = s->h263_aic;
851 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
852 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
855 /* These are just to be sure */
859 case AV_CODEC_ID_FLV1:
860 s->out_format = FMT_H263;
861 s->h263_flv = 2; /* format = 1; 11-bit codes */
862 s->unrestricted_mv = 1;
863 s->rtp_mode = 0; /* don't allow GOB */
867 case AV_CODEC_ID_RV10:
868 s->out_format = FMT_H263;
872 case AV_CODEC_ID_RV20:
873 s->out_format = FMT_H263;
876 s->modified_quant = 1;
880 s->unrestricted_mv = 0;
882 case AV_CODEC_ID_MPEG4:
883 s->out_format = FMT_H263;
885 s->unrestricted_mv = 1;
886 s->low_delay = s->max_b_frames ? 0 : 1;
887 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
889 case AV_CODEC_ID_MSMPEG4V2:
890 s->out_format = FMT_H263;
892 s->unrestricted_mv = 1;
893 s->msmpeg4_version = 2;
897 case AV_CODEC_ID_MSMPEG4V3:
898 s->out_format = FMT_H263;
900 s->unrestricted_mv = 1;
901 s->msmpeg4_version = 3;
902 s->flipflop_rounding = 1;
906 case AV_CODEC_ID_WMV1:
907 s->out_format = FMT_H263;
909 s->unrestricted_mv = 1;
910 s->msmpeg4_version = 4;
911 s->flipflop_rounding = 1;
915 case AV_CODEC_ID_WMV2:
916 s->out_format = FMT_H263;
918 s->unrestricted_mv = 1;
919 s->msmpeg4_version = 5;
920 s->flipflop_rounding = 1;
925 return AVERROR(EINVAL);
928 #if FF_API_PRIVATE_OPT
929 FF_DISABLE_DEPRECATION_WARNINGS
930 if (avctx->noise_reduction)
931 s->noise_reduction = avctx->noise_reduction;
932 FF_ENABLE_DEPRECATION_WARNINGS
935 avctx->has_b_frames = !s->low_delay;
939 s->progressive_frame =
940 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
941 AV_CODEC_FLAG_INTERLACED_ME) ||
946 if ((ret = ff_mpv_common_init(s)) < 0)
949 ff_fdctdsp_init(&s->fdsp, avctx);
950 ff_me_cmp_init(&s->mecc, avctx);
951 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
952 ff_pixblockdsp_init(&s->pdsp, avctx);
953 ff_qpeldsp_init(&s->qdsp);
955 if (s->msmpeg4_version) {
956 int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
957 if (!(s->ac_stats = av_mallocz(ac_stats_size)))
958 return AVERROR(ENOMEM);
961 if (!(avctx->stats_out = av_mallocz(256)) ||
962 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
963 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
964 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
965 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
966 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
967 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
968 !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
969 !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
970 return AVERROR(ENOMEM);
972 if (s->noise_reduction) {
973 if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
974 return AVERROR(ENOMEM);
977 ff_dct_encode_init(s);
979 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
980 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
982 if (s->slice_context_count > 1) {
985 if (avctx->codec_id == AV_CODEC_ID_H263P)
986 s->h263_slice_structured = 1;
989 s->quant_precision = 5;
991 #if FF_API_PRIVATE_OPT
992 FF_DISABLE_DEPRECATION_WARNINGS
993 if (avctx->frame_skip_threshold)
994 s->frame_skip_threshold = avctx->frame_skip_threshold;
995 if (avctx->frame_skip_factor)
996 s->frame_skip_factor = avctx->frame_skip_factor;
997 if (avctx->frame_skip_exp)
998 s->frame_skip_exp = avctx->frame_skip_exp;
999 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
1000 s->frame_skip_cmp = avctx->frame_skip_cmp;
1001 FF_ENABLE_DEPRECATION_WARNINGS
1004 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
1005 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
1007 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
1008 ff_h261_encode_init(s);
1009 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
1010 ff_h263_encode_init(s);
1011 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
1012 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
1014 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1015 && s->out_format == FMT_MPEG1)
1016 ff_mpeg1_encode_init(s);
1019 for (i = 0; i < 64; i++) {
1020 int j = s->idsp.idct_permutation[i];
1021 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1023 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1024 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1025 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1026 s->intra_matrix[j] =
1027 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1028 } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
1029 s->intra_matrix[j] =
1030 s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1033 s->chroma_intra_matrix[j] =
1034 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1035 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1037 if (avctx->intra_matrix)
1038 s->intra_matrix[j] = avctx->intra_matrix[i];
1039 if (avctx->inter_matrix)
1040 s->inter_matrix[j] = avctx->inter_matrix[i];
1043 /* precompute matrix */
1044 /* for mjpeg, we do include qscale in the matrix */
1045 if (s->out_format != FMT_MJPEG) {
1046 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1047 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1049 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1050 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1054 if ((ret = ff_rate_control_init(s)) < 0)
1057 #if FF_API_PRIVATE_OPT
1058 FF_DISABLE_DEPRECATION_WARNINGS
1059 if (avctx->brd_scale)
1060 s->brd_scale = avctx->brd_scale;
1062 if (avctx->prediction_method)
1063 s->pred = avctx->prediction_method + 1;
1064 FF_ENABLE_DEPRECATION_WARNINGS
1067 if (s->b_frame_strategy == 2) {
1068 for (i = 0; i < s->max_b_frames + 2; i++) {
1069 s->tmp_frames[i] = av_frame_alloc();
1070 if (!s->tmp_frames[i])
1071 return AVERROR(ENOMEM);
1073 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1074 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1075 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1077 ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1083 cpb_props = ff_add_cpb_side_data(avctx);
1085 return AVERROR(ENOMEM);
1086 cpb_props->max_bitrate = avctx->rc_max_rate;
1087 cpb_props->min_bitrate = avctx->rc_min_rate;
1088 cpb_props->avg_bitrate = avctx->bit_rate;
1089 cpb_props->buffer_size = avctx->rc_buffer_size;
1094 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1096 MpegEncContext *s = avctx->priv_data;
1099 ff_rate_control_uninit(s);
1101 ff_mpv_common_end(s);
1102 if (CONFIG_MJPEG_ENCODER &&
1103 s->out_format == FMT_MJPEG)
1104 ff_mjpeg_encode_close(s);
1106 av_freep(&avctx->extradata);
1108 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1109 av_frame_free(&s->tmp_frames[i]);
1111 ff_free_picture_tables(&s->new_picture);
1112 ff_mpeg_unref_picture(avctx, &s->new_picture);
1114 av_freep(&avctx->stats_out);
1115 av_freep(&s->ac_stats);
1117 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1118 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1119 s->q_chroma_intra_matrix= NULL;
1120 s->q_chroma_intra_matrix16= NULL;
1121 av_freep(&s->q_intra_matrix);
1122 av_freep(&s->q_inter_matrix);
1123 av_freep(&s->q_intra_matrix16);
1124 av_freep(&s->q_inter_matrix16);
1125 av_freep(&s->input_picture);
1126 av_freep(&s->reordered_input_picture);
1127 av_freep(&s->dct_offset);
1132 static int get_sae(uint8_t *src, int ref, int stride)
1137 for (y = 0; y < 16; y++) {
1138 for (x = 0; x < 16; x++) {
1139 acc += FFABS(src[x + y * stride] - ref);
1146 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1147 uint8_t *ref, int stride)
1153 h = s->height & ~15;
1155 for (y = 0; y < h; y += 16) {
1156 for (x = 0; x < w; x += 16) {
1157 int offset = x + y * stride;
1158 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1160 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1161 int sae = get_sae(src + offset, mean, stride);
1163 acc += sae + 500 < sad;
1169 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1171 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1172 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1173 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1174 &s->linesize, &s->uvlinesize);
1177 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1179 Picture *pic = NULL;
1181 int i, display_picture_number = 0, ret;
1182 int encoding_delay = s->max_b_frames ? s->max_b_frames
1183 : (s->low_delay ? 0 : 1);
1184 int flush_offset = 1;
1189 display_picture_number = s->input_picture_number++;
1191 if (pts != AV_NOPTS_VALUE) {
1192 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1193 int64_t last = s->user_specified_pts;
1196 av_log(s->avctx, AV_LOG_ERROR,
1197 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1199 return AVERROR(EINVAL);
1202 if (!s->low_delay && display_picture_number == 1)
1203 s->dts_delta = pts - last;
1205 s->user_specified_pts = pts;
1207 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1208 s->user_specified_pts =
1209 pts = s->user_specified_pts + 1;
1210 av_log(s->avctx, AV_LOG_INFO,
1211 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1214 pts = display_picture_number;
1218 if (!pic_arg->buf[0] ||
1219 pic_arg->linesize[0] != s->linesize ||
1220 pic_arg->linesize[1] != s->uvlinesize ||
1221 pic_arg->linesize[2] != s->uvlinesize)
1223 if ((s->width & 15) || (s->height & 15))
1225 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1227 if (s->linesize & (STRIDE_ALIGN-1))
1230 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1231 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1233 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1237 pic = &s->picture[i];
1241 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1244 ret = alloc_picture(s, pic, direct);
1249 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1250 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1251 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1254 int h_chroma_shift, v_chroma_shift;
1255 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1259 for (i = 0; i < 3; i++) {
1260 int src_stride = pic_arg->linesize[i];
1261 int dst_stride = i ? s->uvlinesize : s->linesize;
1262 int h_shift = i ? h_chroma_shift : 0;
1263 int v_shift = i ? v_chroma_shift : 0;
1264 int w = s->width >> h_shift;
1265 int h = s->height >> v_shift;
1266 uint8_t *src = pic_arg->data[i];
1267 uint8_t *dst = pic->f->data[i];
1270 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1271 && !s->progressive_sequence
1272 && FFALIGN(s->height, 32) - s->height > 16)
1275 if (!s->avctx->rc_buffer_size)
1276 dst += INPLACE_OFFSET;
1278 if (src_stride == dst_stride)
1279 memcpy(dst, src, src_stride * h);
1282 uint8_t *dst2 = dst;
1284 memcpy(dst2, src, w);
1289 if ((s->width & 15) || (s->height & (vpad-1))) {
1290 s->mpvencdsp.draw_edges(dst, dst_stride,
1300 ret = av_frame_copy_props(pic->f, pic_arg);
1304 pic->f->display_picture_number = display_picture_number;
1305 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1307 /* Flushing: When we have not received enough input frames,
1308 * ensure s->input_picture[0] contains the first picture */
1309 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1310 if (s->input_picture[flush_offset])
1313 if (flush_offset <= 1)
1316 encoding_delay = encoding_delay - flush_offset + 1;
1319 /* shift buffer entries */
1320 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1321 s->input_picture[i - flush_offset] = s->input_picture[i];
1323 s->input_picture[encoding_delay] = (Picture*) pic;
1328 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1332 int64_t score64 = 0;
1334 for (plane = 0; plane < 3; plane++) {
1335 const int stride = p->f->linesize[plane];
1336 const int bw = plane ? 1 : 2;
1337 for (y = 0; y < s->mb_height * bw; y++) {
1338 for (x = 0; x < s->mb_width * bw; x++) {
1339 int off = p->shared ? 0 : 16;
1340 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1341 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1342 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1344 switch (FFABS(s->frame_skip_exp)) {
1345 case 0: score = FFMAX(score, v); break;
1346 case 1: score += FFABS(v); break;
1347 case 2: score64 += v * (int64_t)v; break;
1348 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1349 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1358 if (s->frame_skip_exp < 0)
1359 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1360 -1.0/s->frame_skip_exp);
1362 if (score64 < s->frame_skip_threshold)
1364 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1369 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1371 AVPacket pkt = { 0 };
1375 av_init_packet(&pkt);
1377 ret = avcodec_send_frame(c, frame);
1382 ret = avcodec_receive_packet(c, &pkt);
1385 av_packet_unref(&pkt);
1386 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1393 static int estimate_best_b_count(MpegEncContext *s)
1395 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1396 const int scale = s->brd_scale;
1397 int width = s->width >> scale;
1398 int height = s->height >> scale;
1399 int i, j, out_size, p_lambda, b_lambda, lambda2;
1400 int64_t best_rd = INT64_MAX;
1401 int best_b_count = -1;
1404 av_assert0(scale >= 0 && scale <= 3);
1407 //s->next_picture_ptr->quality;
1408 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1409 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1410 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1411 if (!b_lambda) // FIXME we should do this somewhere else
1412 b_lambda = p_lambda;
1413 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1416 for (i = 0; i < s->max_b_frames + 2; i++) {
1417 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1418 s->next_picture_ptr;
1421 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1422 pre_input = *pre_input_ptr;
1423 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1425 if (!pre_input.shared && i) {
1426 data[0] += INPLACE_OFFSET;
1427 data[1] += INPLACE_OFFSET;
1428 data[2] += INPLACE_OFFSET;
1431 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1432 s->tmp_frames[i]->linesize[0],
1434 pre_input.f->linesize[0],
1436 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1437 s->tmp_frames[i]->linesize[1],
1439 pre_input.f->linesize[1],
1440 width >> 1, height >> 1);
1441 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1442 s->tmp_frames[i]->linesize[2],
1444 pre_input.f->linesize[2],
1445 width >> 1, height >> 1);
1449 for (j = 0; j < s->max_b_frames + 1; j++) {
1453 if (!s->input_picture[j])
1456 c = avcodec_alloc_context3(NULL);
1458 return AVERROR(ENOMEM);
1462 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1463 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1464 c->mb_decision = s->avctx->mb_decision;
1465 c->me_cmp = s->avctx->me_cmp;
1466 c->mb_cmp = s->avctx->mb_cmp;
1467 c->me_sub_cmp = s->avctx->me_sub_cmp;
1468 c->pix_fmt = AV_PIX_FMT_YUV420P;
1469 c->time_base = s->avctx->time_base;
1470 c->max_b_frames = s->max_b_frames;
1472 ret = avcodec_open2(c, codec, NULL);
1476 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1477 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1479 out_size = encode_frame(c, s->tmp_frames[0]);
1485 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1487 for (i = 0; i < s->max_b_frames + 1; i++) {
1488 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1490 s->tmp_frames[i + 1]->pict_type = is_p ?
1491 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1492 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1494 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1500 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1503 /* get the delayed frames */
1504 out_size = encode_frame(c, NULL);
1509 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1511 rd += c->error[0] + c->error[1] + c->error[2];
1519 avcodec_free_context(&c);
1524 return best_b_count;
1527 static int select_input_picture(MpegEncContext *s)
1531 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1532 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1533 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1535 /* set next picture type & ordering */
1536 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1537 if (s->frame_skip_threshold || s->frame_skip_factor) {
1538 if (s->picture_in_gop_number < s->gop_size &&
1539 s->next_picture_ptr &&
1540 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1541 // FIXME check that the gop check above is +-1 correct
1542 av_frame_unref(s->input_picture[0]->f);
1544 ff_vbv_update(s, 0);
1550 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1551 !s->next_picture_ptr || s->intra_only) {
1552 s->reordered_input_picture[0] = s->input_picture[0];
1553 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1554 s->reordered_input_picture[0]->f->coded_picture_number =
1555 s->coded_picture_number++;
1559 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1560 for (i = 0; i < s->max_b_frames + 1; i++) {
1561 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1563 if (pict_num >= s->rc_context.num_entries)
1565 if (!s->input_picture[i]) {
1566 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1570 s->input_picture[i]->f->pict_type =
1571 s->rc_context.entry[pict_num].new_pict_type;
1575 if (s->b_frame_strategy == 0) {
1576 b_frames = s->max_b_frames;
1577 while (b_frames && !s->input_picture[b_frames])
1579 } else if (s->b_frame_strategy == 1) {
1580 for (i = 1; i < s->max_b_frames + 1; i++) {
1581 if (s->input_picture[i] &&
1582 s->input_picture[i]->b_frame_score == 0) {
1583 s->input_picture[i]->b_frame_score =
1585 s->input_picture[i ]->f->data[0],
1586 s->input_picture[i - 1]->f->data[0],
1590 for (i = 0; i < s->max_b_frames + 1; i++) {
1591 if (!s->input_picture[i] ||
1592 s->input_picture[i]->b_frame_score - 1 >
1593 s->mb_num / s->b_sensitivity)
1597 b_frames = FFMAX(0, i - 1);
1600 for (i = 0; i < b_frames + 1; i++) {
1601 s->input_picture[i]->b_frame_score = 0;
1603 } else if (s->b_frame_strategy == 2) {
1604 b_frames = estimate_best_b_count(s);
1611 for (i = b_frames - 1; i >= 0; i--) {
1612 int type = s->input_picture[i]->f->pict_type;
1613 if (type && type != AV_PICTURE_TYPE_B)
1616 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1617 b_frames == s->max_b_frames) {
1618 av_log(s->avctx, AV_LOG_ERROR,
1619 "warning, too many B-frames in a row\n");
1622 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1623 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1624 s->gop_size > s->picture_in_gop_number) {
1625 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1627 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1629 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1633 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1634 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1637 s->reordered_input_picture[0] = s->input_picture[b_frames];
1638 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1639 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1640 s->reordered_input_picture[0]->f->coded_picture_number =
1641 s->coded_picture_number++;
1642 for (i = 0; i < b_frames; i++) {
1643 s->reordered_input_picture[i + 1] = s->input_picture[i];
1644 s->reordered_input_picture[i + 1]->f->pict_type =
1646 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1647 s->coded_picture_number++;
1652 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1654 if (s->reordered_input_picture[0]) {
1655 s->reordered_input_picture[0]->reference =
1656 s->reordered_input_picture[0]->f->pict_type !=
1657 AV_PICTURE_TYPE_B ? 3 : 0;
1659 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1662 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1663 // input is a shared pix, so we can't modify it -> allocate a new
1664 // one & ensure that the shared one is reuseable
1667 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1670 pic = &s->picture[i];
1672 pic->reference = s->reordered_input_picture[0]->reference;
1673 if (alloc_picture(s, pic, 0) < 0) {
1677 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1681 /* mark us unused / free shared pic */
1682 av_frame_unref(s->reordered_input_picture[0]->f);
1683 s->reordered_input_picture[0]->shared = 0;
1685 s->current_picture_ptr = pic;
1687 // input is not a shared pix -> reuse buffer for current_pix
1688 s->current_picture_ptr = s->reordered_input_picture[0];
1689 for (i = 0; i < 4; i++) {
1690 s->new_picture.f->data[i] += INPLACE_OFFSET;
1693 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1694 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1695 s->current_picture_ptr)) < 0)
1698 s->picture_number = s->new_picture.f->display_picture_number;
1703 static void frame_end(MpegEncContext *s)
1705 if (s->unrestricted_mv &&
1706 s->current_picture.reference &&
1708 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1709 int hshift = desc->log2_chroma_w;
1710 int vshift = desc->log2_chroma_h;
1711 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1712 s->current_picture.f->linesize[0],
1713 s->h_edge_pos, s->v_edge_pos,
1714 EDGE_WIDTH, EDGE_WIDTH,
1715 EDGE_TOP | EDGE_BOTTOM);
1716 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1717 s->current_picture.f->linesize[1],
1718 s->h_edge_pos >> hshift,
1719 s->v_edge_pos >> vshift,
1720 EDGE_WIDTH >> hshift,
1721 EDGE_WIDTH >> vshift,
1722 EDGE_TOP | EDGE_BOTTOM);
1723 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1724 s->current_picture.f->linesize[2],
1725 s->h_edge_pos >> hshift,
1726 s->v_edge_pos >> vshift,
1727 EDGE_WIDTH >> hshift,
1728 EDGE_WIDTH >> vshift,
1729 EDGE_TOP | EDGE_BOTTOM);
1734 s->last_pict_type = s->pict_type;
1735 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1736 if (s->pict_type!= AV_PICTURE_TYPE_B)
1737 s->last_non_b_pict_type = s->pict_type;
1739 #if FF_API_CODED_FRAME
1740 FF_DISABLE_DEPRECATION_WARNINGS
1741 av_frame_unref(s->avctx->coded_frame);
1742 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1743 FF_ENABLE_DEPRECATION_WARNINGS
1745 #if FF_API_ERROR_FRAME
1746 FF_DISABLE_DEPRECATION_WARNINGS
1747 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1748 sizeof(s->current_picture.encoding_error));
1749 FF_ENABLE_DEPRECATION_WARNINGS
1753 static void update_noise_reduction(MpegEncContext *s)
1757 for (intra = 0; intra < 2; intra++) {
1758 if (s->dct_count[intra] > (1 << 16)) {
1759 for (i = 0; i < 64; i++) {
1760 s->dct_error_sum[intra][i] >>= 1;
1762 s->dct_count[intra] >>= 1;
1765 for (i = 0; i < 64; i++) {
1766 s->dct_offset[intra][i] = (s->noise_reduction *
1767 s->dct_count[intra] +
1768 s->dct_error_sum[intra][i] / 2) /
1769 (s->dct_error_sum[intra][i] + 1);
1774 static int frame_start(MpegEncContext *s)
1778 /* mark & release old frames */
1779 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1780 s->last_picture_ptr != s->next_picture_ptr &&
1781 s->last_picture_ptr->f->buf[0]) {
1782 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1785 s->current_picture_ptr->f->pict_type = s->pict_type;
1786 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1788 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1789 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1790 s->current_picture_ptr)) < 0)
1793 if (s->pict_type != AV_PICTURE_TYPE_B) {
1794 s->last_picture_ptr = s->next_picture_ptr;
1796 s->next_picture_ptr = s->current_picture_ptr;
1799 if (s->last_picture_ptr) {
1800 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1801 if (s->last_picture_ptr->f->buf[0] &&
1802 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1803 s->last_picture_ptr)) < 0)
1806 if (s->next_picture_ptr) {
1807 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1808 if (s->next_picture_ptr->f->buf[0] &&
1809 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1810 s->next_picture_ptr)) < 0)
1814 if (s->picture_structure!= PICT_FRAME) {
1816 for (i = 0; i < 4; i++) {
1817 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1818 s->current_picture.f->data[i] +=
1819 s->current_picture.f->linesize[i];
1821 s->current_picture.f->linesize[i] *= 2;
1822 s->last_picture.f->linesize[i] *= 2;
1823 s->next_picture.f->linesize[i] *= 2;
1827 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1828 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1829 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1830 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1831 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1832 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1834 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1835 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1838 if (s->dct_error_sum) {
1839 av_assert2(s->noise_reduction && s->encoding);
1840 update_noise_reduction(s);
1846 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1847 const AVFrame *pic_arg, int *got_packet)
1849 MpegEncContext *s = avctx->priv_data;
1850 int i, stuffing_count, ret;
1851 int context_count = s->slice_context_count;
1853 s->vbv_ignore_qmax = 0;
1855 s->picture_in_gop_number++;
1857 if (load_input_picture(s, pic_arg) < 0)
1860 if (select_input_picture(s) < 0) {
1865 if (s->new_picture.f->data[0]) {
1866 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1867 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1869 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1870 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1873 s->mb_info_ptr = av_packet_new_side_data(pkt,
1874 AV_PKT_DATA_H263_MB_INFO,
1875 s->mb_width*s->mb_height*12);
1876 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1879 for (i = 0; i < context_count; i++) {
1880 int start_y = s->thread_context[i]->start_mb_y;
1881 int end_y = s->thread_context[i]-> end_mb_y;
1882 int h = s->mb_height;
1883 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1884 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1886 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1889 s->pict_type = s->new_picture.f->pict_type;
1891 ret = frame_start(s);
1895 ret = encode_picture(s, s->picture_number);
1896 if (growing_buffer) {
1897 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1898 pkt->data = s->pb.buf;
1899 pkt->size = avctx->internal->byte_buffer_size;
1904 #if FF_API_STAT_BITS
1905 FF_DISABLE_DEPRECATION_WARNINGS
1906 avctx->header_bits = s->header_bits;
1907 avctx->mv_bits = s->mv_bits;
1908 avctx->misc_bits = s->misc_bits;
1909 avctx->i_tex_bits = s->i_tex_bits;
1910 avctx->p_tex_bits = s->p_tex_bits;
1911 avctx->i_count = s->i_count;
1912 // FIXME f/b_count in avctx
1913 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1914 avctx->skip_count = s->skip_count;
1915 FF_ENABLE_DEPRECATION_WARNINGS
1920 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1921 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1923 if (avctx->rc_buffer_size) {
1924 RateControlContext *rcc = &s->rc_context;
1925 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1926 int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1927 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1929 if (put_bits_count(&s->pb) > max_size &&
1930 s->lambda < s->lmax) {
1931 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1932 (s->qscale + 1) / s->qscale);
1933 if (s->adaptive_quant) {
1935 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1936 s->lambda_table[i] =
1937 FFMAX(s->lambda_table[i] + min_step,
1938 s->lambda_table[i] * (s->qscale + 1) /
1941 s->mb_skipped = 0; // done in frame_start()
1942 // done in encode_picture() so we must undo it
1943 if (s->pict_type == AV_PICTURE_TYPE_P) {
1944 if (s->flipflop_rounding ||
1945 s->codec_id == AV_CODEC_ID_H263P ||
1946 s->codec_id == AV_CODEC_ID_MPEG4)
1947 s->no_rounding ^= 1;
1949 if (s->pict_type != AV_PICTURE_TYPE_B) {
1950 s->time_base = s->last_time_base;
1951 s->last_non_b_time = s->time - s->pp_time;
1953 for (i = 0; i < context_count; i++) {
1954 PutBitContext *pb = &s->thread_context[i]->pb;
1955 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1957 s->vbv_ignore_qmax = 1;
1958 av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1962 av_assert0(avctx->rc_max_rate);
1965 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1966 ff_write_pass1_stats(s);
1968 for (i = 0; i < 4; i++) {
1969 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1970 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1972 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1973 s->current_picture_ptr->encoding_error,
1974 (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1977 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1978 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1979 s->misc_bits + s->i_tex_bits +
1981 flush_put_bits(&s->pb);
1982 s->frame_bits = put_bits_count(&s->pb);
1984 stuffing_count = ff_vbv_update(s, s->frame_bits);
1985 s->stuffing_bits = 8*stuffing_count;
1986 if (stuffing_count) {
1987 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1988 stuffing_count + 50) {
1989 av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1993 switch (s->codec_id) {
1994 case AV_CODEC_ID_MPEG1VIDEO:
1995 case AV_CODEC_ID_MPEG2VIDEO:
1996 while (stuffing_count--) {
1997 put_bits(&s->pb, 8, 0);
2000 case AV_CODEC_ID_MPEG4:
2001 put_bits(&s->pb, 16, 0);
2002 put_bits(&s->pb, 16, 0x1C3);
2003 stuffing_count -= 4;
2004 while (stuffing_count--) {
2005 put_bits(&s->pb, 8, 0xFF);
2009 av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2011 flush_put_bits(&s->pb);
2012 s->frame_bits = put_bits_count(&s->pb);
2015 /* update MPEG-1/2 vbv_delay for CBR */
2016 if (avctx->rc_max_rate &&
2017 avctx->rc_min_rate == avctx->rc_max_rate &&
2018 s->out_format == FMT_MPEG1 &&
2019 90000LL * (avctx->rc_buffer_size - 1) <=
2020 avctx->rc_max_rate * 0xFFFFLL) {
2021 AVCPBProperties *props;
2024 int vbv_delay, min_delay;
2025 double inbits = avctx->rc_max_rate *
2026 av_q2d(avctx->time_base);
2027 int minbits = s->frame_bits - 8 *
2028 (s->vbv_delay_ptr - s->pb.buf - 1);
2029 double bits = s->rc_context.buffer_index + minbits - inbits;
2032 av_log(avctx, AV_LOG_ERROR,
2033 "Internal error, negative bits\n");
2035 av_assert1(s->repeat_first_field == 0);
2037 vbv_delay = bits * 90000 / avctx->rc_max_rate;
2038 min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2041 vbv_delay = FFMAX(vbv_delay, min_delay);
2043 av_assert0(vbv_delay < 0xFFFF);
2045 s->vbv_delay_ptr[0] &= 0xF8;
2046 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2047 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2048 s->vbv_delay_ptr[2] &= 0x07;
2049 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2051 props = av_cpb_properties_alloc(&props_size);
2053 return AVERROR(ENOMEM);
2054 props->vbv_delay = vbv_delay * 300;
2056 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2057 (uint8_t*)props, props_size);
2063 #if FF_API_VBV_DELAY
2064 FF_DISABLE_DEPRECATION_WARNINGS
2065 avctx->vbv_delay = vbv_delay * 300;
2066 FF_ENABLE_DEPRECATION_WARNINGS
2069 s->total_bits += s->frame_bits;
2070 #if FF_API_STAT_BITS
2071 FF_DISABLE_DEPRECATION_WARNINGS
2072 avctx->frame_bits = s->frame_bits;
2073 FF_ENABLE_DEPRECATION_WARNINGS
2077 pkt->pts = s->current_picture.f->pts;
2078 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2079 if (!s->current_picture.f->coded_picture_number)
2080 pkt->dts = pkt->pts - s->dts_delta;
2082 pkt->dts = s->reordered_pts;
2083 s->reordered_pts = pkt->pts;
2085 pkt->dts = pkt->pts;
2086 if (s->current_picture.f->key_frame)
2087 pkt->flags |= AV_PKT_FLAG_KEY;
2089 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2094 /* release non-reference frames */
2095 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2096 if (!s->picture[i].reference)
2097 ff_mpeg_unref_picture(avctx, &s->picture[i]);
2100 av_assert1((s->frame_bits & 7) == 0);
2102 pkt->size = s->frame_bits / 8;
2103 *got_packet = !!pkt->size;
2107 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2108 int n, int threshold)
2110 static const char tab[64] = {
2111 3, 2, 2, 1, 1, 1, 1, 1,
2112 1, 1, 1, 1, 1, 1, 1, 1,
2113 1, 1, 1, 1, 1, 1, 1, 1,
2114 0, 0, 0, 0, 0, 0, 0, 0,
2115 0, 0, 0, 0, 0, 0, 0, 0,
2116 0, 0, 0, 0, 0, 0, 0, 0,
2117 0, 0, 0, 0, 0, 0, 0, 0,
2118 0, 0, 0, 0, 0, 0, 0, 0
2123 int16_t *block = s->block[n];
2124 const int last_index = s->block_last_index[n];
2127 if (threshold < 0) {
2129 threshold = -threshold;
2133 /* Are all we could set to zero already zero? */
2134 if (last_index <= skip_dc - 1)
2137 for (i = 0; i <= last_index; i++) {
2138 const int j = s->intra_scantable.permutated[i];
2139 const int level = FFABS(block[j]);
2141 if (skip_dc && i == 0)
2145 } else if (level > 1) {
2151 if (score >= threshold)
2153 for (i = skip_dc; i <= last_index; i++) {
2154 const int j = s->intra_scantable.permutated[i];
2158 s->block_last_index[n] = 0;
2160 s->block_last_index[n] = -1;
2163 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2167 const int maxlevel = s->max_qcoeff;
2168 const int minlevel = s->min_qcoeff;
2172 i = 1; // skip clipping of intra dc
2176 for (; i <= last_index; i++) {
2177 const int j = s->intra_scantable.permutated[i];
2178 int level = block[j];
2180 if (level > maxlevel) {
2183 } else if (level < minlevel) {
2191 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2192 av_log(s->avctx, AV_LOG_INFO,
2193 "warning, clipping %d dct coefficients to %d..%d\n",
2194 overflow, minlevel, maxlevel);
2197 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2201 for (y = 0; y < 8; y++) {
2202 for (x = 0; x < 8; x++) {
2208 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2209 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2210 int v = ptr[x2 + y2 * stride];
2216 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2221 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2222 int motion_x, int motion_y,
2223 int mb_block_height,
2227 int16_t weight[12][64];
2228 int16_t orig[12][64];
2229 const int mb_x = s->mb_x;
2230 const int mb_y = s->mb_y;
2233 int dct_offset = s->linesize * 8; // default for progressive frames
2234 int uv_dct_offset = s->uvlinesize * 8;
2235 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2236 ptrdiff_t wrap_y, wrap_c;
2238 for (i = 0; i < mb_block_count; i++)
2239 skip_dct[i] = s->skipdct;
2241 if (s->adaptive_quant) {
2242 const int last_qp = s->qscale;
2243 const int mb_xy = mb_x + mb_y * s->mb_stride;
2245 s->lambda = s->lambda_table[mb_xy];
2248 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2249 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2250 s->dquant = s->qscale - last_qp;
2252 if (s->out_format == FMT_H263) {
2253 s->dquant = av_clip(s->dquant, -2, 2);
2255 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2257 if (s->pict_type == AV_PICTURE_TYPE_B) {
2258 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2261 if (s->mv_type == MV_TYPE_8X8)
2267 ff_set_qscale(s, last_qp + s->dquant);
2268 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2269 ff_set_qscale(s, s->qscale + s->dquant);
2271 wrap_y = s->linesize;
2272 wrap_c = s->uvlinesize;
2273 ptr_y = s->new_picture.f->data[0] +
2274 (mb_y * 16 * wrap_y) + mb_x * 16;
2275 ptr_cb = s->new_picture.f->data[1] +
2276 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2277 ptr_cr = s->new_picture.f->data[2] +
2278 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2280 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2281 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2282 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2283 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2284 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2286 16, 16, mb_x * 16, mb_y * 16,
2287 s->width, s->height);
2289 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2291 mb_block_width, mb_block_height,
2292 mb_x * mb_block_width, mb_y * mb_block_height,
2294 ptr_cb = ebuf + 16 * wrap_y;
2295 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2297 mb_block_width, mb_block_height,
2298 mb_x * mb_block_width, mb_y * mb_block_height,
2300 ptr_cr = ebuf + 16 * wrap_y + 16;
2304 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2305 int progressive_score, interlaced_score;
2307 s->interlaced_dct = 0;
2308 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2309 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2310 NULL, wrap_y, 8) - 400;
2312 if (progressive_score > 0) {
2313 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2314 NULL, wrap_y * 2, 8) +
2315 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2316 NULL, wrap_y * 2, 8);
2317 if (progressive_score > interlaced_score) {
2318 s->interlaced_dct = 1;
2320 dct_offset = wrap_y;
2321 uv_dct_offset = wrap_c;
2323 if (s->chroma_format == CHROMA_422 ||
2324 s->chroma_format == CHROMA_444)
2330 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2331 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2332 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2333 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2335 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2339 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2340 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2341 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2342 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2343 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2344 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2345 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2346 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2347 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2348 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2349 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2350 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2354 op_pixels_func (*op_pix)[4];
2355 qpel_mc_func (*op_qpix)[16];
2356 uint8_t *dest_y, *dest_cb, *dest_cr;
2358 dest_y = s->dest[0];
2359 dest_cb = s->dest[1];
2360 dest_cr = s->dest[2];
2362 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2363 op_pix = s->hdsp.put_pixels_tab;
2364 op_qpix = s->qdsp.put_qpel_pixels_tab;
2366 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2367 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2370 if (s->mv_dir & MV_DIR_FORWARD) {
2371 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2372 s->last_picture.f->data,
2374 op_pix = s->hdsp.avg_pixels_tab;
2375 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2377 if (s->mv_dir & MV_DIR_BACKWARD) {
2378 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2379 s->next_picture.f->data,
2383 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2384 int progressive_score, interlaced_score;
2386 s->interlaced_dct = 0;
2387 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2388 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2392 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2393 progressive_score -= 400;
2395 if (progressive_score > 0) {
2396 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2398 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2402 if (progressive_score > interlaced_score) {
2403 s->interlaced_dct = 1;
2405 dct_offset = wrap_y;
2406 uv_dct_offset = wrap_c;
2408 if (s->chroma_format == CHROMA_422)
2414 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2415 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2416 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2417 dest_y + dct_offset, wrap_y);
2418 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2419 dest_y + dct_offset + 8, wrap_y);
2421 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2425 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2426 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2427 if (!s->chroma_y_shift) { /* 422 */
2428 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2429 dest_cb + uv_dct_offset, wrap_c);
2430 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2431 dest_cr + uv_dct_offset, wrap_c);
2434 /* pre quantization */
2435 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2436 2 * s->qscale * s->qscale) {
2438 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2440 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2442 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2443 wrap_y, 8) < 20 * s->qscale)
2445 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2446 wrap_y, 8) < 20 * s->qscale)
2448 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2450 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2452 if (!s->chroma_y_shift) { /* 422 */
2453 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2454 dest_cb + uv_dct_offset,
2455 wrap_c, 8) < 20 * s->qscale)
2457 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2458 dest_cr + uv_dct_offset,
2459 wrap_c, 8) < 20 * s->qscale)
2465 if (s->quantizer_noise_shaping) {
2467 get_visual_weight(weight[0], ptr_y , wrap_y);
2469 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2471 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2473 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2475 get_visual_weight(weight[4], ptr_cb , wrap_c);
2477 get_visual_weight(weight[5], ptr_cr , wrap_c);
2478 if (!s->chroma_y_shift) { /* 422 */
2480 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2483 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2486 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2489 /* DCT & quantize */
2490 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2492 for (i = 0; i < mb_block_count; i++) {
2495 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2496 // FIXME we could decide to change to quantizer instead of
2498 // JS: I don't think that would be a good idea it could lower
2499 // quality instead of improve it. Just INTRADC clipping
2500 // deserves changes in quantizer
2502 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2504 s->block_last_index[i] = -1;
2506 if (s->quantizer_noise_shaping) {
2507 for (i = 0; i < mb_block_count; i++) {
2509 s->block_last_index[i] =
2510 dct_quantize_refine(s, s->block[i], weight[i],
2511 orig[i], i, s->qscale);
2516 if (s->luma_elim_threshold && !s->mb_intra)
2517 for (i = 0; i < 4; i++)
2518 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2519 if (s->chroma_elim_threshold && !s->mb_intra)
2520 for (i = 4; i < mb_block_count; i++)
2521 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2523 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2524 for (i = 0; i < mb_block_count; i++) {
2525 if (s->block_last_index[i] == -1)
2526 s->coded_score[i] = INT_MAX / 256;
2531 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2532 s->block_last_index[4] =
2533 s->block_last_index[5] = 0;
2535 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2536 if (!s->chroma_y_shift) { /* 422 / 444 */
2537 for (i=6; i<12; i++) {
2538 s->block_last_index[i] = 0;
2539 s->block[i][0] = s->block[4][0];
2544 // non c quantize code returns incorrect block_last_index FIXME
2545 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2546 for (i = 0; i < mb_block_count; i++) {
2548 if (s->block_last_index[i] > 0) {
2549 for (j = 63; j > 0; j--) {
2550 if (s->block[i][s->intra_scantable.permutated[j]])
2553 s->block_last_index[i] = j;
2558 /* huffman encode */
2559 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2560 case AV_CODEC_ID_MPEG1VIDEO:
2561 case AV_CODEC_ID_MPEG2VIDEO:
2562 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2563 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2565 case AV_CODEC_ID_MPEG4:
2566 if (CONFIG_MPEG4_ENCODER)
2567 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2569 case AV_CODEC_ID_MSMPEG4V2:
2570 case AV_CODEC_ID_MSMPEG4V3:
2571 case AV_CODEC_ID_WMV1:
2572 if (CONFIG_MSMPEG4_ENCODER)
2573 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2575 case AV_CODEC_ID_WMV2:
2576 if (CONFIG_WMV2_ENCODER)
2577 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2579 case AV_CODEC_ID_H261:
2580 if (CONFIG_H261_ENCODER)
2581 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2583 case AV_CODEC_ID_H263:
2584 case AV_CODEC_ID_H263P:
2585 case AV_CODEC_ID_FLV1:
2586 case AV_CODEC_ID_RV10:
2587 case AV_CODEC_ID_RV20:
2588 if (CONFIG_H263_ENCODER)
2589 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2591 case AV_CODEC_ID_MJPEG:
2592 case AV_CODEC_ID_AMV:
2593 if (CONFIG_MJPEG_ENCODER)
2594 ff_mjpeg_encode_mb(s, s->block);
2596 case AV_CODEC_ID_SPEEDHQ:
2597 if (CONFIG_SPEEDHQ_ENCODER)
2598 ff_speedhq_encode_mb(s, s->block);
2605 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2607 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2608 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2609 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2612 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2615 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2618 d->mb_skip_run= s->mb_skip_run;
2620 d->last_dc[i] = s->last_dc[i];
2623 d->mv_bits= s->mv_bits;
2624 d->i_tex_bits= s->i_tex_bits;
2625 d->p_tex_bits= s->p_tex_bits;
2626 d->i_count= s->i_count;
2627 d->f_count= s->f_count;
2628 d->b_count= s->b_count;
2629 d->skip_count= s->skip_count;
2630 d->misc_bits= s->misc_bits;
2634 d->qscale= s->qscale;
2635 d->dquant= s->dquant;
2637 d->esc3_level_length= s->esc3_level_length;
2640 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2643 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2644 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2647 d->mb_skip_run= s->mb_skip_run;
2649 d->last_dc[i] = s->last_dc[i];
2652 d->mv_bits= s->mv_bits;
2653 d->i_tex_bits= s->i_tex_bits;
2654 d->p_tex_bits= s->p_tex_bits;
2655 d->i_count= s->i_count;
2656 d->f_count= s->f_count;
2657 d->b_count= s->b_count;
2658 d->skip_count= s->skip_count;
2659 d->misc_bits= s->misc_bits;
2661 d->mb_intra= s->mb_intra;
2662 d->mb_skipped= s->mb_skipped;
2663 d->mv_type= s->mv_type;
2664 d->mv_dir= s->mv_dir;
2666 if(s->data_partitioning){
2668 d->tex_pb= s->tex_pb;
2672 d->block_last_index[i]= s->block_last_index[i];
2673 d->interlaced_dct= s->interlaced_dct;
2674 d->qscale= s->qscale;
2676 d->esc3_level_length= s->esc3_level_length;
2679 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2680 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2681 int *dmin, int *next_block, int motion_x, int motion_y)
2684 uint8_t *dest_backup[3];
2686 copy_context_before_encode(s, backup, type);
2688 s->block= s->blocks[*next_block];
2689 s->pb= pb[*next_block];
2690 if(s->data_partitioning){
2691 s->pb2 = pb2 [*next_block];
2692 s->tex_pb= tex_pb[*next_block];
2696 memcpy(dest_backup, s->dest, sizeof(s->dest));
2697 s->dest[0] = s->sc.rd_scratchpad;
2698 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2699 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2700 av_assert0(s->linesize >= 32); //FIXME
2703 encode_mb(s, motion_x, motion_y);
2705 score= put_bits_count(&s->pb);
2706 if(s->data_partitioning){
2707 score+= put_bits_count(&s->pb2);
2708 score+= put_bits_count(&s->tex_pb);
2711 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2712 ff_mpv_reconstruct_mb(s, s->block);
2714 score *= s->lambda2;
2715 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2719 memcpy(s->dest, dest_backup, sizeof(s->dest));
2726 copy_context_after_encode(best, s, type);
2730 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2731 const uint32_t *sq = ff_square_tab + 256;
2736 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2737 else if(w==8 && h==8)
2738 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2742 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2751 static int sse_mb(MpegEncContext *s){
2755 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2756 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2759 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2760 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2761 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2762 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2764 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2765 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2766 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2769 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2770 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2771 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2774 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2775 MpegEncContext *s= *(void**)arg;
2779 s->me.dia_size= s->avctx->pre_dia_size;
2780 s->first_slice_line=1;
2781 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2782 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2783 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2785 s->first_slice_line=0;
2793 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2794 MpegEncContext *s= *(void**)arg;
2796 s->me.dia_size= s->avctx->dia_size;
2797 s->first_slice_line=1;
2798 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2799 s->mb_x=0; //for block init below
2800 ff_init_block_index(s);
2801 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2802 s->block_index[0]+=2;
2803 s->block_index[1]+=2;
2804 s->block_index[2]+=2;
2805 s->block_index[3]+=2;
2807 /* compute motion vector & mb_type and store in context */
2808 if(s->pict_type==AV_PICTURE_TYPE_B)
2809 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2811 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2813 s->first_slice_line=0;
2818 static int mb_var_thread(AVCodecContext *c, void *arg){
2819 MpegEncContext *s= *(void**)arg;
2822 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2823 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2826 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2828 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2830 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2831 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2833 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2834 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2835 s->me.mb_var_sum_temp += varc;
2841 static void write_slice_end(MpegEncContext *s){
2842 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2843 if(s->partitioned_frame){
2844 ff_mpeg4_merge_partitions(s);
2847 ff_mpeg4_stuffing(&s->pb);
2848 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2849 ff_mjpeg_encode_stuffing(s);
2850 } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2851 ff_speedhq_end_slice(s);
2854 flush_put_bits(&s->pb);
2856 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2857 s->misc_bits+= get_bits_diff(s);
2860 static void write_mb_info(MpegEncContext *s)
2862 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2863 int offset = put_bits_count(&s->pb);
2864 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2865 int gobn = s->mb_y / s->gob_index;
2867 if (CONFIG_H263_ENCODER)
2868 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2869 bytestream_put_le32(&ptr, offset);
2870 bytestream_put_byte(&ptr, s->qscale);
2871 bytestream_put_byte(&ptr, gobn);
2872 bytestream_put_le16(&ptr, mba);
2873 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2874 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2875 /* 4MV not implemented */
2876 bytestream_put_byte(&ptr, 0); /* hmv2 */
2877 bytestream_put_byte(&ptr, 0); /* vmv2 */
2880 static void update_mb_info(MpegEncContext *s, int startcode)
2884 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2885 s->mb_info_size += 12;
2886 s->prev_mb_info = s->last_mb_info;
2889 s->prev_mb_info = put_bits_count(&s->pb)/8;
2890 /* This might have incremented mb_info_size above, and we return without
2891 * actually writing any info into that slot yet. But in that case,
2892 * this will be called again at the start of the after writing the
2893 * start code, actually writing the mb info. */
2897 s->last_mb_info = put_bits_count(&s->pb)/8;
2898 if (!s->mb_info_size)
2899 s->mb_info_size += 12;
2903 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2905 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2906 && s->slice_context_count == 1
2907 && s->pb.buf == s->avctx->internal->byte_buffer) {
2908 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2909 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2911 uint8_t *new_buffer = NULL;
2912 int new_buffer_size = 0;
2914 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2915 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2916 return AVERROR(ENOMEM);
2921 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2922 s->avctx->internal->byte_buffer_size + size_increase);
2924 return AVERROR(ENOMEM);
2926 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2927 av_free(s->avctx->internal->byte_buffer);
2928 s->avctx->internal->byte_buffer = new_buffer;
2929 s->avctx->internal->byte_buffer_size = new_buffer_size;
2930 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2931 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2932 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2934 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2935 return AVERROR(EINVAL);
2939 static int encode_thread(AVCodecContext *c, void *arg){
2940 MpegEncContext *s= *(void**)arg;
2941 int mb_x, mb_y, mb_y_order;
2942 int chr_h= 16>>s->chroma_y_shift;
2944 MpegEncContext best_s = { 0 }, backup_s;
2945 uint8_t bit_buf[2][MAX_MB_BYTES];
2946 uint8_t bit_buf2[2][MAX_MB_BYTES];
2947 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2948 PutBitContext pb[2], pb2[2], tex_pb[2];
2951 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2952 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2953 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2956 s->last_bits= put_bits_count(&s->pb);
2967 /* init last dc values */
2968 /* note: quant matrix value (8) is implied here */
2969 s->last_dc[i] = 128 << s->intra_dc_precision;
2971 s->current_picture.encoding_error[i] = 0;
2973 if(s->codec_id==AV_CODEC_ID_AMV){
2974 s->last_dc[0] = 128*8/13;
2975 s->last_dc[1] = 128*8/14;
2976 s->last_dc[2] = 128*8/14;
2979 memset(s->last_mv, 0, sizeof(s->last_mv));
2983 switch(s->codec_id){
2984 case AV_CODEC_ID_H263:
2985 case AV_CODEC_ID_H263P:
2986 case AV_CODEC_ID_FLV1:
2987 if (CONFIG_H263_ENCODER)
2988 s->gob_index = H263_GOB_HEIGHT(s->height);
2990 case AV_CODEC_ID_MPEG4:
2991 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2992 ff_mpeg4_init_partitions(s);
2998 s->first_slice_line = 1;
2999 s->ptr_lastgob = s->pb.buf;
3000 for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
3001 if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
3003 mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
3004 if (first_in_slice && mb_y_order != s->start_mb_y)
3005 ff_speedhq_end_slice(s);
3006 s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
3013 ff_set_qscale(s, s->qscale);
3014 ff_init_block_index(s);
3016 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3017 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3018 int mb_type= s->mb_type[xy];
3022 int size_increase = s->avctx->internal->byte_buffer_size/4
3023 + s->mb_width*MAX_MB_BYTES;
3025 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
3026 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
3027 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3030 if(s->data_partitioning){
3031 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3032 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3033 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3039 s->mb_y = mb_y; // moved into loop, can get changed by H.261
3040 ff_update_block_index(s);
3042 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3043 ff_h261_reorder_mb_index(s);
3044 xy= s->mb_y*s->mb_stride + s->mb_x;
3045 mb_type= s->mb_type[xy];
3048 /* write gob / video packet header */
3050 int current_packet_size, is_gob_start;
3052 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3054 is_gob_start = s->rtp_payload_size &&
3055 current_packet_size >= s->rtp_payload_size &&
3058 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3060 switch(s->codec_id){
3061 case AV_CODEC_ID_H263:
3062 case AV_CODEC_ID_H263P:
3063 if(!s->h263_slice_structured)
3064 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3066 case AV_CODEC_ID_MPEG2VIDEO:
3067 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3068 case AV_CODEC_ID_MPEG1VIDEO:
3069 if(s->mb_skip_run) is_gob_start=0;
3071 case AV_CODEC_ID_MJPEG:
3072 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3077 if(s->start_mb_y != mb_y || mb_x!=0){
3080 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3081 ff_mpeg4_init_partitions(s);
3085 av_assert2((put_bits_count(&s->pb)&7) == 0);
3086 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3088 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3089 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3090 int d = 100 / s->error_rate;
3092 current_packet_size=0;
3093 s->pb.buf_ptr= s->ptr_lastgob;
3094 av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3098 #if FF_API_RTP_CALLBACK
3099 FF_DISABLE_DEPRECATION_WARNINGS
3100 if (s->avctx->rtp_callback){
3101 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3102 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3104 FF_ENABLE_DEPRECATION_WARNINGS
3106 update_mb_info(s, 1);
3108 switch(s->codec_id){
3109 case AV_CODEC_ID_MPEG4:
3110 if (CONFIG_MPEG4_ENCODER) {
3111 ff_mpeg4_encode_video_packet_header(s);
3112 ff_mpeg4_clean_buffers(s);
3115 case AV_CODEC_ID_MPEG1VIDEO:
3116 case AV_CODEC_ID_MPEG2VIDEO:
3117 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3118 ff_mpeg1_encode_slice_header(s);
3119 ff_mpeg1_clean_buffers(s);
3122 case AV_CODEC_ID_H263:
3123 case AV_CODEC_ID_H263P:
3124 if (CONFIG_H263_ENCODER)
3125 ff_h263_encode_gob_header(s, mb_y);
3129 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3130 int bits= put_bits_count(&s->pb);
3131 s->misc_bits+= bits - s->last_bits;
3135 s->ptr_lastgob += current_packet_size;
3136 s->first_slice_line=1;
3137 s->resync_mb_x=mb_x;
3138 s->resync_mb_y=mb_y;
3142 if( (s->resync_mb_x == s->mb_x)
3143 && s->resync_mb_y+1 == s->mb_y){
3144 s->first_slice_line=0;
3148 s->dquant=0; //only for QP_RD
3150 update_mb_info(s, 0);
3152 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3154 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3156 copy_context_before_encode(&backup_s, s, -1);
3158 best_s.data_partitioning= s->data_partitioning;
3159 best_s.partitioned_frame= s->partitioned_frame;
3160 if(s->data_partitioning){
3161 backup_s.pb2= s->pb2;
3162 backup_s.tex_pb= s->tex_pb;
3165 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3166 s->mv_dir = MV_DIR_FORWARD;
3167 s->mv_type = MV_TYPE_16X16;
3169 s->mv[0][0][0] = s->p_mv_table[xy][0];
3170 s->mv[0][0][1] = s->p_mv_table[xy][1];
3171 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3172 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3174 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3175 s->mv_dir = MV_DIR_FORWARD;
3176 s->mv_type = MV_TYPE_FIELD;
3179 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3180 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3181 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3183 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3184 &dmin, &next_block, 0, 0);
3186 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3187 s->mv_dir = MV_DIR_FORWARD;
3188 s->mv_type = MV_TYPE_16X16;
3192 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3193 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3195 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3196 s->mv_dir = MV_DIR_FORWARD;
3197 s->mv_type = MV_TYPE_8X8;
3200 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3201 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3203 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3204 &dmin, &next_block, 0, 0);
3206 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3207 s->mv_dir = MV_DIR_FORWARD;
3208 s->mv_type = MV_TYPE_16X16;
3210 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3211 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3212 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3213 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3215 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3216 s->mv_dir = MV_DIR_BACKWARD;
3217 s->mv_type = MV_TYPE_16X16;
3219 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3220 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3221 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3222 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3224 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3225 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3226 s->mv_type = MV_TYPE_16X16;
3228 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3229 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3230 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3231 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3232 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3233 &dmin, &next_block, 0, 0);
3235 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3236 s->mv_dir = MV_DIR_FORWARD;
3237 s->mv_type = MV_TYPE_FIELD;
3240 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3241 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3242 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3244 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3245 &dmin, &next_block, 0, 0);
3247 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3248 s->mv_dir = MV_DIR_BACKWARD;
3249 s->mv_type = MV_TYPE_FIELD;
3252 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3253 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3254 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3256 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3257 &dmin, &next_block, 0, 0);
3259 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3260 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3261 s->mv_type = MV_TYPE_FIELD;
3263 for(dir=0; dir<2; dir++){
3265 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3266 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3267 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3270 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3271 &dmin, &next_block, 0, 0);
3273 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3275 s->mv_type = MV_TYPE_16X16;
3279 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3280 &dmin, &next_block, 0, 0);
3281 if(s->h263_pred || s->h263_aic){
3283 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3285 ff_clean_intra_table_entries(s); //old mode?
3289 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3290 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3291 const int last_qp= backup_s.qscale;
3294 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3295 static const int dquant_tab[4]={-1,1,-2,2};
3296 int storecoefs = s->mb_intra && s->dc_val[0];
3298 av_assert2(backup_s.dquant == 0);
3301 s->mv_dir= best_s.mv_dir;
3302 s->mv_type = MV_TYPE_16X16;
3303 s->mb_intra= best_s.mb_intra;
3304 s->mv[0][0][0] = best_s.mv[0][0][0];
3305 s->mv[0][0][1] = best_s.mv[0][0][1];
3306 s->mv[1][0][0] = best_s.mv[1][0][0];
3307 s->mv[1][0][1] = best_s.mv[1][0][1];
3309 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3310 for(; qpi<4; qpi++){
3311 int dquant= dquant_tab[qpi];
3312 qp= last_qp + dquant;
3313 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3315 backup_s.dquant= dquant;
3318 dc[i]= s->dc_val[0][ s->block_index[i] ];
3319 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3323 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3324 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3325 if(best_s.qscale != qp){
3328 s->dc_val[0][ s->block_index[i] ]= dc[i];
3329 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3336 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3337 int mx= s->b_direct_mv_table[xy][0];
3338 int my= s->b_direct_mv_table[xy][1];
3340 backup_s.dquant = 0;
3341 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3343 ff_mpeg4_set_direct_mv(s, mx, my);
3344 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3345 &dmin, &next_block, mx, my);
3347 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3348 backup_s.dquant = 0;
3349 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3351 ff_mpeg4_set_direct_mv(s, 0, 0);
3352 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3353 &dmin, &next_block, 0, 0);
3355 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3358 coded |= s->block_last_index[i];
3361 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3362 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3363 mx=my=0; //FIXME find the one we actually used
3364 ff_mpeg4_set_direct_mv(s, mx, my);
3365 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3373 s->mv_dir= best_s.mv_dir;
3374 s->mv_type = best_s.mv_type;
3376 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3377 s->mv[0][0][1] = best_s.mv[0][0][1];
3378 s->mv[1][0][0] = best_s.mv[1][0][0];
3379 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3382 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3383 &dmin, &next_block, mx, my);
3388 s->current_picture.qscale_table[xy] = best_s.qscale;
3390 copy_context_after_encode(s, &best_s, -1);
3392 pb_bits_count= put_bits_count(&s->pb);
3393 flush_put_bits(&s->pb);
3394 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3397 if(s->data_partitioning){
3398 pb2_bits_count= put_bits_count(&s->pb2);
3399 flush_put_bits(&s->pb2);
3400 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3401 s->pb2= backup_s.pb2;
3403 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3404 flush_put_bits(&s->tex_pb);
3405 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3406 s->tex_pb= backup_s.tex_pb;
3408 s->last_bits= put_bits_count(&s->pb);
3410 if (CONFIG_H263_ENCODER &&
3411 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3412 ff_h263_update_motion_val(s);
3414 if(next_block==0){ //FIXME 16 vs linesize16
3415 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3416 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3417 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3420 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3421 ff_mpv_reconstruct_mb(s, s->block);
3423 int motion_x = 0, motion_y = 0;
3424 s->mv_type=MV_TYPE_16X16;
3425 // only one MB-Type possible
3428 case CANDIDATE_MB_TYPE_INTRA:
3431 motion_x= s->mv[0][0][0] = 0;
3432 motion_y= s->mv[0][0][1] = 0;
3434 case CANDIDATE_MB_TYPE_INTER:
3435 s->mv_dir = MV_DIR_FORWARD;
3437 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3438 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3440 case CANDIDATE_MB_TYPE_INTER_I:
3441 s->mv_dir = MV_DIR_FORWARD;
3442 s->mv_type = MV_TYPE_FIELD;
3445 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3446 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3447 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3450 case CANDIDATE_MB_TYPE_INTER4V:
3451 s->mv_dir = MV_DIR_FORWARD;
3452 s->mv_type = MV_TYPE_8X8;
3455 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3456 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3459 case CANDIDATE_MB_TYPE_DIRECT:
3460 if (CONFIG_MPEG4_ENCODER) {
3461 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3463 motion_x=s->b_direct_mv_table[xy][0];
3464 motion_y=s->b_direct_mv_table[xy][1];
3465 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3468 case CANDIDATE_MB_TYPE_DIRECT0:
3469 if (CONFIG_MPEG4_ENCODER) {
3470 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3472 ff_mpeg4_set_direct_mv(s, 0, 0);
3475 case CANDIDATE_MB_TYPE_BIDIR:
3476 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3478 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3479 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3480 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3481 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3483 case CANDIDATE_MB_TYPE_BACKWARD:
3484 s->mv_dir = MV_DIR_BACKWARD;
3486 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3487 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3489 case CANDIDATE_MB_TYPE_FORWARD:
3490 s->mv_dir = MV_DIR_FORWARD;
3492 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3493 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3495 case CANDIDATE_MB_TYPE_FORWARD_I:
3496 s->mv_dir = MV_DIR_FORWARD;
3497 s->mv_type = MV_TYPE_FIELD;
3500 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3501 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3502 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3505 case CANDIDATE_MB_TYPE_BACKWARD_I:
3506 s->mv_dir = MV_DIR_BACKWARD;
3507 s->mv_type = MV_TYPE_FIELD;
3510 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3511 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3512 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3515 case CANDIDATE_MB_TYPE_BIDIR_I:
3516 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3517 s->mv_type = MV_TYPE_FIELD;
3519 for(dir=0; dir<2; dir++){
3521 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3522 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3523 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3528 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3531 encode_mb(s, motion_x, motion_y);
3533 // RAL: Update last macroblock type
3534 s->last_mv_dir = s->mv_dir;
3536 if (CONFIG_H263_ENCODER &&
3537 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3538 ff_h263_update_motion_val(s);
3540 ff_mpv_reconstruct_mb(s, s->block);
3543 /* clean the MV table in IPS frames for direct mode in B-frames */
3544 if(s->mb_intra /* && I,P,S_TYPE */){
3545 s->p_mv_table[xy][0]=0;
3546 s->p_mv_table[xy][1]=0;
3549 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3553 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3554 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3556 s->current_picture.encoding_error[0] += sse(
3557 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3558 s->dest[0], w, h, s->linesize);
3559 s->current_picture.encoding_error[1] += sse(
3560 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3561 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3562 s->current_picture.encoding_error[2] += sse(
3563 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3564 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3567 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3568 ff_h263_loop_filter(s);
3570 ff_dlog(s->avctx, "MB %d %d bits\n",
3571 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3575 //not beautiful here but we must write it before flushing so it has to be here
3576 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3577 ff_msmpeg4_encode_ext_header(s);
3581 #if FF_API_RTP_CALLBACK
3582 FF_DISABLE_DEPRECATION_WARNINGS
3583 /* Send the last GOB if RTP */
3584 if (s->avctx->rtp_callback) {
3585 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3586 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3587 /* Call the RTP callback to send the last GOB */
3589 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3591 FF_ENABLE_DEPRECATION_WARNINGS
3597 #define MERGE(field) dst->field += src->field; src->field=0
3598 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3599 MERGE(me.scene_change_score);
3600 MERGE(me.mc_mb_var_sum_temp);
3601 MERGE(me.mb_var_sum_temp);
3604 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3607 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3608 MERGE(dct_count[1]);
3617 MERGE(er.error_count);
3618 MERGE(padding_bug_score);
3619 MERGE(current_picture.encoding_error[0]);
3620 MERGE(current_picture.encoding_error[1]);
3621 MERGE(current_picture.encoding_error[2]);
3623 if (dst->noise_reduction){
3624 for(i=0; i<64; i++){
3625 MERGE(dct_error_sum[0][i]);
3626 MERGE(dct_error_sum[1][i]);
3630 av_assert1(put_bits_count(&src->pb) % 8 ==0);
3631 av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3632 ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3633 flush_put_bits(&dst->pb);
3636 static int estimate_qp(MpegEncContext *s, int dry_run){
3637 if (s->next_lambda){
3638 s->current_picture_ptr->f->quality =
3639 s->current_picture.f->quality = s->next_lambda;
3640 if(!dry_run) s->next_lambda= 0;
3641 } else if (!s->fixed_qscale) {
3642 int quality = ff_rate_estimate_qscale(s, dry_run);
3643 s->current_picture_ptr->f->quality =
3644 s->current_picture.f->quality = quality;
3645 if (s->current_picture.f->quality < 0)
3649 if(s->adaptive_quant){
3650 switch(s->codec_id){
3651 case AV_CODEC_ID_MPEG4:
3652 if (CONFIG_MPEG4_ENCODER)
3653 ff_clean_mpeg4_qscales(s);
3655 case AV_CODEC_ID_H263:
3656 case AV_CODEC_ID_H263P:
3657 case AV_CODEC_ID_FLV1:
3658 if (CONFIG_H263_ENCODER)
3659 ff_clean_h263_qscales(s);
3662 ff_init_qscale_tab(s);
3665 s->lambda= s->lambda_table[0];
3668 s->lambda = s->current_picture.f->quality;
3673 /* must be called before writing the header */
3674 static void set_frame_distances(MpegEncContext * s){
3675 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3676 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3678 if(s->pict_type==AV_PICTURE_TYPE_B){
3679 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3680 av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3682 s->pp_time= s->time - s->last_non_b_time;
3683 s->last_non_b_time= s->time;
3684 av_assert1(s->picture_number==0 || s->pp_time > 0);
3688 static int encode_picture(MpegEncContext *s, int picture_number)
3692 int context_count = s->slice_context_count;
3694 s->picture_number = picture_number;
3696 /* Reset the average MB variance */
3697 s->me.mb_var_sum_temp =
3698 s->me.mc_mb_var_sum_temp = 0;
3700 /* we need to initialize some time vars before we can encode B-frames */
3701 // RAL: Condition added for MPEG1VIDEO
3702 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3703 set_frame_distances(s);
3704 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3705 ff_set_mpeg4_time(s);
3707 s->me.scene_change_score=0;
3709 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3711 if(s->pict_type==AV_PICTURE_TYPE_I){
3712 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3713 else s->no_rounding=0;
3714 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3715 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3716 s->no_rounding ^= 1;
3719 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3720 if (estimate_qp(s,1) < 0)
3722 ff_get_2pass_fcode(s);
3723 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3724 if(s->pict_type==AV_PICTURE_TYPE_B)
3725 s->lambda= s->last_lambda_for[s->pict_type];
3727 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3731 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3732 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3733 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3734 s->q_chroma_intra_matrix = s->q_intra_matrix;
3735 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3738 s->mb_intra=0; //for the rate distortion & bit compare functions
3739 for(i=1; i<context_count; i++){
3740 ret = ff_update_duplicate_context(s->thread_context[i], s);
3748 /* Estimate motion for every MB */
3749 if(s->pict_type != AV_PICTURE_TYPE_I){
3750 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3751 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3752 if (s->pict_type != AV_PICTURE_TYPE_B) {
3753 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3755 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3759 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3760 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3762 for(i=0; i<s->mb_stride*s->mb_height; i++)
3763 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3765 if(!s->fixed_qscale){
3766 /* finding spatial complexity for I-frame rate control */
3767 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3770 for(i=1; i<context_count; i++){
3771 merge_context_after_me(s, s->thread_context[i]);
3773 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3774 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3777 if (s->me.scene_change_score > s->scenechange_threshold &&
3778 s->pict_type == AV_PICTURE_TYPE_P) {
3779 s->pict_type= AV_PICTURE_TYPE_I;
3780 for(i=0; i<s->mb_stride*s->mb_height; i++)
3781 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3782 if(s->msmpeg4_version >= 3)
3784 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3785 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3789 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3790 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3792 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3794 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3795 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3796 s->f_code= FFMAX3(s->f_code, a, b);
3799 ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
3800 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3801 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3805 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3806 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3811 if(s->pict_type==AV_PICTURE_TYPE_B){
3814 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3815 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3816 s->f_code = FFMAX(a, b);
3818 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3819 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3820 s->b_code = FFMAX(a, b);
3822 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3823 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3824 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3825 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3826 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3828 for(dir=0; dir<2; dir++){
3831 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3832 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3833 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3834 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3842 if (estimate_qp(s, 0) < 0)
3845 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3846 s->pict_type == AV_PICTURE_TYPE_I &&
3847 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3848 s->qscale= 3; //reduce clipping problems
3850 if (s->out_format == FMT_MJPEG) {
3851 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3852 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3854 if (s->avctx->intra_matrix) {
3856 luma_matrix = s->avctx->intra_matrix;
3858 if (s->avctx->chroma_intra_matrix)
3859 chroma_matrix = s->avctx->chroma_intra_matrix;
3861 /* for mjpeg, we do include qscale in the matrix */
3863 int j = s->idsp.idct_permutation[i];
3865 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3866 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3868 s->y_dc_scale_table=
3869 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3870 s->chroma_intra_matrix[0] =
3871 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3872 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3873 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3874 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3875 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3878 if(s->codec_id == AV_CODEC_ID_AMV){
3879 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3880 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3882 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3884 s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3885 s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3887 s->y_dc_scale_table= y;
3888 s->c_dc_scale_table= c;
3889 s->intra_matrix[0] = 13;
3890 s->chroma_intra_matrix[0] = 14;
3891 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3892 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3893 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3894 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3898 if (s->out_format == FMT_SPEEDHQ) {
3899 s->y_dc_scale_table=
3900 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3903 //FIXME var duplication
3904 s->current_picture_ptr->f->key_frame =
3905 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3906 s->current_picture_ptr->f->pict_type =
3907 s->current_picture.f->pict_type = s->pict_type;
3909 if (s->current_picture.f->key_frame)
3910 s->picture_in_gop_number=0;
3912 s->mb_x = s->mb_y = 0;
3913 s->last_bits= put_bits_count(&s->pb);
3914 switch(s->out_format) {
3916 if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3917 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3918 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3921 if (CONFIG_SPEEDHQ_ENCODER)
3922 ff_speedhq_encode_picture_header(s);
3925 if (CONFIG_H261_ENCODER)
3926 ff_h261_encode_picture_header(s, picture_number);
3929 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3930 ff_wmv2_encode_picture_header(s, picture_number);
3931 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3932 ff_msmpeg4_encode_picture_header(s, picture_number);
3933 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3934 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3937 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3938 ret = ff_rv10_encode_picture_header(s, picture_number);
3942 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3943 ff_rv20_encode_picture_header(s, picture_number);
3944 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3945 ff_flv_encode_picture_header(s, picture_number);
3946 else if (CONFIG_H263_ENCODER)
3947 ff_h263_encode_picture_header(s, picture_number);
3950 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3951 ff_mpeg1_encode_picture_header(s, picture_number);
3956 bits= put_bits_count(&s->pb);
3957 s->header_bits= bits - s->last_bits;
3959 for(i=1; i<context_count; i++){
3960 update_duplicate_context_after_me(s->thread_context[i], s);
3962 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3963 for(i=1; i<context_count; i++){
3964 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3965 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3966 merge_context_after_encode(s, s->thread_context[i]);
3972 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3973 const int intra= s->mb_intra;
3976 s->dct_count[intra]++;
3978 for(i=0; i<64; i++){
3979 int level= block[i];
3983 s->dct_error_sum[intra][i] += level;
3984 level -= s->dct_offset[intra][i];
3985 if(level<0) level=0;
3987 s->dct_error_sum[intra][i] -= level;
3988 level += s->dct_offset[intra][i];
3989 if(level>0) level=0;
3996 static int dct_quantize_trellis_c(MpegEncContext *s,
3997 int16_t *block, int n,
3998 int qscale, int *overflow){
4000 const uint16_t *matrix;
4001 const uint8_t *scantable;
4002 const uint8_t *perm_scantable;
4004 unsigned int threshold1, threshold2;
4016 int coeff_count[64];
4017 int qmul, qadd, start_i, last_non_zero, i, dc;
4018 const int esc_length= s->ac_esc_length;
4020 uint8_t * last_length;
4021 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4024 s->fdsp.fdct(block);
4026 if(s->dct_error_sum)
4027 s->denoise_dct(s, block);
4029 qadd= ((qscale-1)|1)*8;
4031 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4032 else mpeg2_qscale = qscale << 1;
4036 scantable= s->intra_scantable.scantable;
4037 perm_scantable= s->intra_scantable.permutated;
4045 /* For AIC we skip quant/dequant of INTRADC */
4050 /* note: block[0] is assumed to be positive */
4051 block[0] = (block[0] + (q >> 1)) / q;
4054 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4055 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4056 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4057 bias= 1<<(QMAT_SHIFT-1);
4059 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4060 length = s->intra_chroma_ac_vlc_length;
4061 last_length= s->intra_chroma_ac_vlc_last_length;
4063 length = s->intra_ac_vlc_length;
4064 last_length= s->intra_ac_vlc_last_length;
4067 scantable= s->inter_scantable.scantable;
4068 perm_scantable= s->inter_scantable.permutated;
4071 qmat = s->q_inter_matrix[qscale];
4072 matrix = s->inter_matrix;
4073 length = s->inter_ac_vlc_length;
4074 last_length= s->inter_ac_vlc_last_length;
4078 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4079 threshold2= (threshold1<<1);
4081 for(i=63; i>=start_i; i--) {
4082 const int j = scantable[i];
4083 int level = block[j] * qmat[j];
4085 if(((unsigned)(level+threshold1))>threshold2){
4091 for(i=start_i; i<=last_non_zero; i++) {
4092 const int j = scantable[i];
4093 int level = block[j] * qmat[j];
4095 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4096 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4097 if(((unsigned)(level+threshold1))>threshold2){
4099 level= (bias + level)>>QMAT_SHIFT;
4101 coeff[1][i]= level-1;
4102 // coeff[2][k]= level-2;
4104 level= (bias - level)>>QMAT_SHIFT;
4105 coeff[0][i]= -level;
4106 coeff[1][i]= -level+1;
4107 // coeff[2][k]= -level+2;
4109 coeff_count[i]= FFMIN(level, 2);
4110 av_assert2(coeff_count[i]);
4113 coeff[0][i]= (level>>31)|1;
4118 *overflow= s->max_qcoeff < max; //overflow might have happened
4120 if(last_non_zero < start_i){
4121 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4122 return last_non_zero;
4125 score_tab[start_i]= 0;
4126 survivor[0]= start_i;
4129 for(i=start_i; i<=last_non_zero; i++){
4130 int level_index, j, zero_distortion;
4131 int dct_coeff= FFABS(block[ scantable[i] ]);
4132 int best_score=256*256*256*120;
4134 if (s->fdsp.fdct == ff_fdct_ifast)
4135 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4136 zero_distortion= dct_coeff*dct_coeff;
4138 for(level_index=0; level_index < coeff_count[i]; level_index++){
4140 int level= coeff[level_index][i];
4141 const int alevel= FFABS(level);
4146 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4147 unquant_coeff= alevel*qmul + qadd;
4148 } else if(s->out_format == FMT_MJPEG) {
4149 j = s->idsp.idct_permutation[scantable[i]];
4150 unquant_coeff = alevel * matrix[j] * 8;
4152 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4154 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4155 unquant_coeff = (unquant_coeff - 1) | 1;
4157 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4158 unquant_coeff = (unquant_coeff - 1) | 1;
4163 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4165 if((level&(~127)) == 0){
4166 for(j=survivor_count-1; j>=0; j--){
4167 int run= i - survivor[j];
4168 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4169 score += score_tab[i-run];
4171 if(score < best_score){
4174 level_tab[i+1]= level-64;
4178 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4179 for(j=survivor_count-1; j>=0; j--){
4180 int run= i - survivor[j];
4181 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4182 score += score_tab[i-run];
4183 if(score < last_score){
4186 last_level= level-64;
4192 distortion += esc_length*lambda;
4193 for(j=survivor_count-1; j>=0; j--){
4194 int run= i - survivor[j];
4195 int score= distortion + score_tab[i-run];
4197 if(score < best_score){
4200 level_tab[i+1]= level-64;
4204 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4205 for(j=survivor_count-1; j>=0; j--){
4206 int run= i - survivor[j];
4207 int score= distortion + score_tab[i-run];
4208 if(score < last_score){
4211 last_level= level-64;
4219 score_tab[i+1]= best_score;
4221 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4222 if(last_non_zero <= 27){
4223 for(; survivor_count; survivor_count--){
4224 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4228 for(; survivor_count; survivor_count--){
4229 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4234 survivor[ survivor_count++ ]= i+1;
4237 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4238 last_score= 256*256*256*120;
4239 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4240 int score= score_tab[i];
4242 score += lambda * 2; // FIXME more exact?
4244 if(score < last_score){
4247 last_level= level_tab[i];
4248 last_run= run_tab[i];
4253 s->coded_score[n] = last_score;
4255 dc= FFABS(block[0]);
4256 last_non_zero= last_i - 1;
4257 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4259 if(last_non_zero < start_i)
4260 return last_non_zero;
4262 if(last_non_zero == 0 && start_i == 0){
4264 int best_score= dc * dc;
4266 for(i=0; i<coeff_count[0]; i++){
4267 int level= coeff[i][0];
4268 int alevel= FFABS(level);
4269 int unquant_coeff, score, distortion;
4271 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4272 unquant_coeff= (alevel*qmul + qadd)>>3;
4274 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4275 unquant_coeff = (unquant_coeff - 1) | 1;
4277 unquant_coeff = (unquant_coeff + 4) >> 3;
4278 unquant_coeff<<= 3 + 3;
4280 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4282 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4283 else score= distortion + esc_length*lambda;
4285 if(score < best_score){
4287 best_level= level - 64;
4290 block[0]= best_level;
4291 s->coded_score[n] = best_score - dc*dc;
4292 if(best_level == 0) return -1;
4293 else return last_non_zero;
4297 av_assert2(last_level);
4299 block[ perm_scantable[last_non_zero] ]= last_level;
4302 for(; i>start_i; i -= run_tab[i] + 1){
4303 block[ perm_scantable[i-1] ]= level_tab[i];
4306 return last_non_zero;
4309 static int16_t basis[64][64];
4311 static void build_basis(uint8_t *perm){
4318 double s= 0.25*(1<<BASIS_SHIFT);
4320 int perm_index= perm[index];
4321 if(i==0) s*= sqrt(0.5);
4322 if(j==0) s*= sqrt(0.5);
4323 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4330 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4331 int16_t *block, int16_t *weight, int16_t *orig,
4334 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4335 const uint8_t *scantable;
4336 const uint8_t *perm_scantable;
4337 // unsigned int threshold1, threshold2;
4342 int qmul, qadd, start_i, last_non_zero, i, dc;
4344 uint8_t * last_length;
4346 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4348 if(basis[0][0] == 0)
4349 build_basis(s->idsp.idct_permutation);
4354 scantable= s->intra_scantable.scantable;
4355 perm_scantable= s->intra_scantable.permutated;
4362 /* For AIC we skip quant/dequant of INTRADC */
4366 q <<= RECON_SHIFT-3;
4367 /* note: block[0] is assumed to be positive */
4369 // block[0] = (block[0] + (q >> 1)) / q;
4371 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4372 // bias= 1<<(QMAT_SHIFT-1);
4373 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4374 length = s->intra_chroma_ac_vlc_length;
4375 last_length= s->intra_chroma_ac_vlc_last_length;
4377 length = s->intra_ac_vlc_length;
4378 last_length= s->intra_ac_vlc_last_length;
4381 scantable= s->inter_scantable.scantable;
4382 perm_scantable= s->inter_scantable.permutated;
4385 length = s->inter_ac_vlc_length;
4386 last_length= s->inter_ac_vlc_last_length;
4388 last_non_zero = s->block_last_index[n];
4390 dc += (1<<(RECON_SHIFT-1));
4391 for(i=0; i<64; i++){
4392 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4396 for(i=0; i<64; i++){
4401 w= FFABS(weight[i]) + qns*one;
4402 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4405 // w=weight[i] = (63*qns + (w/2)) / w;
4408 av_assert2(w<(1<<6));
4411 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4415 for(i=start_i; i<=last_non_zero; i++){
4416 int j= perm_scantable[i];
4417 const int level= block[j];
4421 if(level<0) coeff= qmul*level - qadd;
4422 else coeff= qmul*level + qadd;
4423 run_tab[rle_index++]=run;
4426 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4433 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4436 int run2, best_unquant_change=0, analyze_gradient;
4437 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4439 if(analyze_gradient){
4440 for(i=0; i<64; i++){
4443 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4449 const int level= block[0];
4450 int change, old_coeff;
4452 av_assert2(s->mb_intra);
4456 for(change=-1; change<=1; change+=2){
4457 int new_level= level + change;
4458 int score, new_coeff;
4460 new_coeff= q*new_level;
4461 if(new_coeff >= 2048 || new_coeff < 0)
4464 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4465 new_coeff - old_coeff);
4466 if(score<best_score){
4469 best_change= change;
4470 best_unquant_change= new_coeff - old_coeff;
4477 run2= run_tab[rle_index++];
4481 for(i=start_i; i<64; i++){
4482 int j= perm_scantable[i];
4483 const int level= block[j];
4484 int change, old_coeff;
4486 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4490 if(level<0) old_coeff= qmul*level - qadd;
4491 else old_coeff= qmul*level + qadd;
4492 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4496 av_assert2(run2>=0 || i >= last_non_zero );
4499 for(change=-1; change<=1; change+=2){
4500 int new_level= level + change;
4501 int score, new_coeff, unquant_change;
4504 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4508 if(new_level<0) new_coeff= qmul*new_level - qadd;
4509 else new_coeff= qmul*new_level + qadd;
4510 if(new_coeff >= 2048 || new_coeff <= -2048)
4512 //FIXME check for overflow
4515 if(level < 63 && level > -63){
4516 if(i < last_non_zero)
4517 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4518 - length[UNI_AC_ENC_INDEX(run, level+64)];
4520 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4521 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4524 av_assert2(FFABS(new_level)==1);
4526 if(analyze_gradient){
4527 int g= d1[ scantable[i] ];
4528 if(g && (g^new_level) >= 0)
4532 if(i < last_non_zero){
4533 int next_i= i + run2 + 1;
4534 int next_level= block[ perm_scantable[next_i] ] + 64;
4536 if(next_level&(~127))
4539 if(next_i < last_non_zero)
4540 score += length[UNI_AC_ENC_INDEX(run, 65)]
4541 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4542 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4544 score += length[UNI_AC_ENC_INDEX(run, 65)]
4545 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4546 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4548 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4550 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4551 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4557 av_assert2(FFABS(level)==1);
4559 if(i < last_non_zero){
4560 int next_i= i + run2 + 1;
4561 int next_level= block[ perm_scantable[next_i] ] + 64;
4563 if(next_level&(~127))
4566 if(next_i < last_non_zero)
4567 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4568 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4569 - length[UNI_AC_ENC_INDEX(run, 65)];
4571 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4572 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4573 - length[UNI_AC_ENC_INDEX(run, 65)];
4575 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4577 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4578 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4585 unquant_change= new_coeff - old_coeff;
4586 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4588 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4590 if(score<best_score){
4593 best_change= change;
4594 best_unquant_change= unquant_change;
4598 prev_level= level + 64;
4599 if(prev_level&(~127))
4609 int j= perm_scantable[ best_coeff ];
4611 block[j] += best_change;
4613 if(best_coeff > last_non_zero){
4614 last_non_zero= best_coeff;
4615 av_assert2(block[j]);
4617 for(; last_non_zero>=start_i; last_non_zero--){
4618 if(block[perm_scantable[last_non_zero]])
4625 for(i=start_i; i<=last_non_zero; i++){
4626 int j= perm_scantable[i];
4627 const int level= block[j];
4630 run_tab[rle_index++]=run;
4637 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4643 return last_non_zero;
4647 * Permute an 8x8 block according to permutation.
4648 * @param block the block which will be permuted according to
4649 * the given permutation vector
4650 * @param permutation the permutation vector
4651 * @param last the last non zero coefficient in scantable order, used to
4652 * speed the permutation up
4653 * @param scantable the used scantable, this is only used to speed the
4654 * permutation up, the block is not (inverse) permutated
4655 * to scantable order!
4657 void ff_block_permute(int16_t *block, uint8_t *permutation,
4658 const uint8_t *scantable, int last)
4665 //FIXME it is ok but not clean and might fail for some permutations
4666 // if (permutation[1] == 1)
4669 for (i = 0; i <= last; i++) {
4670 const int j = scantable[i];
4675 for (i = 0; i <= last; i++) {
4676 const int j = scantable[i];
4677 const int perm_j = permutation[j];
4678 block[perm_j] = temp[j];
4682 int ff_dct_quantize_c(MpegEncContext *s,
4683 int16_t *block, int n,
4684 int qscale, int *overflow)
4686 int i, j, level, last_non_zero, q, start_i;
4688 const uint8_t *scantable;
4691 unsigned int threshold1, threshold2;
4693 s->fdsp.fdct(block);
4695 if(s->dct_error_sum)
4696 s->denoise_dct(s, block);
4699 scantable= s->intra_scantable.scantable;
4707 /* For AIC we skip quant/dequant of INTRADC */
4710 /* note: block[0] is assumed to be positive */
4711 block[0] = (block[0] + (q >> 1)) / q;
4714 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4715 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4717 scantable= s->inter_scantable.scantable;
4720 qmat = s->q_inter_matrix[qscale];
4721 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4723 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4724 threshold2= (threshold1<<1);
4725 for(i=63;i>=start_i;i--) {
4727 level = block[j] * qmat[j];
4729 if(((unsigned)(level+threshold1))>threshold2){
4736 for(i=start_i; i<=last_non_zero; i++) {
4738 level = block[j] * qmat[j];
4740 // if( bias+level >= (1<<QMAT_SHIFT)
4741 // || bias-level >= (1<<QMAT_SHIFT)){
4742 if(((unsigned)(level+threshold1))>threshold2){
4744 level= (bias + level)>>QMAT_SHIFT;
4747 level= (bias - level)>>QMAT_SHIFT;
4755 *overflow= s->max_qcoeff < max; //overflow might have happened
4757 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4758 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4759 ff_block_permute(block, s->idsp.idct_permutation,
4760 scantable, last_non_zero);
4762 return last_non_zero;
4765 #define OFFSET(x) offsetof(MpegEncContext, x)
4766 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4767 static const AVOption h263_options[] = {
4768 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4769 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4774 static const AVClass h263_class = {
4775 .class_name = "H.263 encoder",
4776 .item_name = av_default_item_name,
4777 .option = h263_options,
4778 .version = LIBAVUTIL_VERSION_INT,
4781 AVCodec ff_h263_encoder = {
4783 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4784 .type = AVMEDIA_TYPE_VIDEO,
4785 .id = AV_CODEC_ID_H263,
4786 .priv_data_size = sizeof(MpegEncContext),
4787 .init = ff_mpv_encode_init,
4788 .encode2 = ff_mpv_encode_picture,
4789 .close = ff_mpv_encode_end,
4790 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4791 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4792 .priv_class = &h263_class,
4795 static const AVOption h263p_options[] = {
4796 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4797 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4798 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4799 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4803 static const AVClass h263p_class = {
4804 .class_name = "H.263p encoder",
4805 .item_name = av_default_item_name,
4806 .option = h263p_options,
4807 .version = LIBAVUTIL_VERSION_INT,
4810 AVCodec ff_h263p_encoder = {
4812 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4813 .type = AVMEDIA_TYPE_VIDEO,
4814 .id = AV_CODEC_ID_H263P,
4815 .priv_data_size = sizeof(MpegEncContext),
4816 .init = ff_mpv_encode_init,
4817 .encode2 = ff_mpv_encode_picture,
4818 .close = ff_mpv_encode_end,
4819 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4820 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4821 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4822 .priv_class = &h263p_class,
4825 static const AVClass msmpeg4v2_class = {
4826 .class_name = "msmpeg4v2 encoder",
4827 .item_name = av_default_item_name,
4828 .option = ff_mpv_generic_options,
4829 .version = LIBAVUTIL_VERSION_INT,
4832 AVCodec ff_msmpeg4v2_encoder = {
4833 .name = "msmpeg4v2",
4834 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4835 .type = AVMEDIA_TYPE_VIDEO,
4836 .id = AV_CODEC_ID_MSMPEG4V2,
4837 .priv_data_size = sizeof(MpegEncContext),
4838 .init = ff_mpv_encode_init,
4839 .encode2 = ff_mpv_encode_picture,
4840 .close = ff_mpv_encode_end,
4841 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4842 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4843 .priv_class = &msmpeg4v2_class,
4846 static const AVClass msmpeg4v3_class = {
4847 .class_name = "msmpeg4v3 encoder",
4848 .item_name = av_default_item_name,
4849 .option = ff_mpv_generic_options,
4850 .version = LIBAVUTIL_VERSION_INT,
4853 AVCodec ff_msmpeg4v3_encoder = {
4855 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4856 .type = AVMEDIA_TYPE_VIDEO,
4857 .id = AV_CODEC_ID_MSMPEG4V3,
4858 .priv_data_size = sizeof(MpegEncContext),
4859 .init = ff_mpv_encode_init,
4860 .encode2 = ff_mpv_encode_picture,
4861 .close = ff_mpv_encode_end,
4862 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4863 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4864 .priv_class = &msmpeg4v3_class,
4867 static const AVClass wmv1_class = {
4868 .class_name = "wmv1 encoder",
4869 .item_name = av_default_item_name,
4870 .option = ff_mpv_generic_options,
4871 .version = LIBAVUTIL_VERSION_INT,
4874 AVCodec ff_wmv1_encoder = {
4876 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4877 .type = AVMEDIA_TYPE_VIDEO,
4878 .id = AV_CODEC_ID_WMV1,
4879 .priv_data_size = sizeof(MpegEncContext),
4880 .init = ff_mpv_encode_init,
4881 .encode2 = ff_mpv_encode_picture,
4882 .close = ff_mpv_encode_end,
4883 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4884 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4885 .priv_class = &wmv1_class,