2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/mem_internal.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/thread.h"
47 #include "mpegvideo.h"
48 #include "mpegvideodata.h"
52 #include "mjpegenc_common.h"
54 #include "mpegutils.h"
56 #include "speedhqenc.h"
58 #include "pixblockdsp.h"
62 #include "aandcttab.h"
64 #include "mpeg4video.h"
66 #include "bytestream.h"
69 #include "packet_internal.h"
73 #define QUANT_BIAS_SHIFT 8
75 #define QMAT_SHIFT_MMX 16
78 static int encode_picture(MpegEncContext *s, int picture_number);
79 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
80 static int sse_mb(MpegEncContext *s);
81 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
82 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
84 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
85 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
87 const AVOption ff_mpv_generic_options[] = {
92 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
93 uint16_t (*qmat16)[2][64],
94 const uint16_t *quant_matrix,
95 int bias, int qmin, int qmax, int intra)
97 FDCTDSPContext *fdsp = &s->fdsp;
101 for (qscale = qmin; qscale <= qmax; qscale++) {
105 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
106 else qscale2 = qscale << 1;
108 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
110 fdsp->fdct == ff_faandct ||
111 #endif /* CONFIG_FAANDCT */
112 fdsp->fdct == ff_jpeg_fdct_islow_10) {
113 for (i = 0; i < 64; i++) {
114 const int j = s->idsp.idct_permutation[i];
115 int64_t den = (int64_t) qscale2 * quant_matrix[j];
116 /* 16 <= qscale * quant_matrix[i] <= 7905
117 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
118 * 19952 <= x <= 249205026
119 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
120 * 3444240 >= (1 << 36) / (x) >= 275 */
122 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
124 } else if (fdsp->fdct == ff_fdct_ifast) {
125 for (i = 0; i < 64; i++) {
126 const int j = s->idsp.idct_permutation[i];
127 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
128 /* 16 <= qscale * quant_matrix[i] <= 7905
129 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
130 * 19952 <= x <= 249205026
131 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
132 * 3444240 >= (1 << 36) / (x) >= 275 */
134 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
137 for (i = 0; i < 64; i++) {
138 const int j = s->idsp.idct_permutation[i];
139 int64_t den = (int64_t) qscale2 * quant_matrix[j];
140 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
141 * Assume x = qscale * quant_matrix[i]
143 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
144 * so 32768 >= (1 << 19) / (x) >= 67 */
145 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
146 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
147 // (qscale * quant_matrix[i]);
148 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
150 if (qmat16[qscale][0][i] == 0 ||
151 qmat16[qscale][0][i] == 128 * 256)
152 qmat16[qscale][0][i] = 128 * 256 - 1;
153 qmat16[qscale][1][i] =
154 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
155 qmat16[qscale][0][i]);
159 for (i = intra; i < 64; i++) {
161 if (fdsp->fdct == ff_fdct_ifast) {
162 max = (8191LL * ff_aanscales[i]) >> 14;
164 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
170 av_log(s->avctx, AV_LOG_INFO,
171 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
176 static inline void update_qscale(MpegEncContext *s)
178 if (s->q_scale_type == 1 && 0) {
180 int bestdiff=INT_MAX;
183 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
184 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
185 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
186 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
188 if (diff < bestdiff) {
195 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
196 (FF_LAMBDA_SHIFT + 7);
197 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
200 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
204 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
210 for (i = 0; i < 64; i++) {
211 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
218 * init s->current_picture.qscale_table from s->lambda_table
220 void ff_init_qscale_tab(MpegEncContext *s)
222 int8_t * const qscale_table = s->current_picture.qscale_table;
225 for (i = 0; i < s->mb_num; i++) {
226 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
227 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
228 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
233 static void update_duplicate_context_after_me(MpegEncContext *dst,
236 #define COPY(a) dst->a= src->a
238 COPY(current_picture);
244 COPY(picture_in_gop_number);
245 COPY(gop_picture_number);
246 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
247 COPY(progressive_frame); // FIXME don't set in encode_header
248 COPY(partitioned_frame); // FIXME don't set in encode_header
252 static void mpv_encode_init_static(void)
254 for (int i = -16; i < 16; i++)
255 default_fcode_tab[i + MAX_MV] = 1;
259 * Set the given MpegEncContext to defaults for encoding.
260 * the changed fields will not depend upon the prior state of the MpegEncContext.
262 static void mpv_encode_defaults(MpegEncContext *s)
264 static AVOnce init_static_once = AV_ONCE_INIT;
266 ff_mpv_common_defaults(s);
268 ff_thread_once(&init_static_once, mpv_encode_init_static);
270 s->me.mv_penalty = default_mv_penalty;
271 s->fcode_tab = default_fcode_tab;
273 s->input_picture_number = 0;
274 s->picture_in_gop_number = 0;
277 av_cold int ff_dct_encode_init(MpegEncContext *s)
280 ff_dct_encode_init_x86(s);
282 if (CONFIG_H263_ENCODER)
283 ff_h263dsp_init(&s->h263dsp);
284 if (!s->dct_quantize)
285 s->dct_quantize = ff_dct_quantize_c;
287 s->denoise_dct = denoise_dct_c;
288 s->fast_dct_quantize = s->dct_quantize;
289 if (s->avctx->trellis)
290 s->dct_quantize = dct_quantize_trellis_c;
295 /* init video encoder */
296 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
298 MpegEncContext *s = avctx->priv_data;
299 AVCPBProperties *cpb_props;
300 int i, ret, format_supported;
302 mpv_encode_defaults(s);
304 switch (avctx->codec_id) {
305 case AV_CODEC_ID_MPEG2VIDEO:
306 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
307 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
308 av_log(avctx, AV_LOG_ERROR,
309 "only YUV420 and YUV422 are supported\n");
310 return AVERROR(EINVAL);
313 case AV_CODEC_ID_MJPEG:
314 case AV_CODEC_ID_AMV:
315 format_supported = 0;
316 /* JPEG color space */
317 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
318 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
319 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
320 (avctx->color_range == AVCOL_RANGE_JPEG &&
321 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
322 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
323 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
324 format_supported = 1;
325 /* MPEG color space */
326 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
327 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
328 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
329 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
330 format_supported = 1;
332 if (!format_supported) {
333 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
334 return AVERROR(EINVAL);
337 case AV_CODEC_ID_SPEEDHQ:
338 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
339 avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
340 avctx->pix_fmt != AV_PIX_FMT_YUV444P) {
341 av_log(avctx, AV_LOG_ERROR,
342 "only YUV420/YUV422/YUV444 are supported (no alpha support yet)\n");
343 return AVERROR(EINVAL);
347 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
348 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
349 return AVERROR(EINVAL);
353 switch (avctx->pix_fmt) {
354 case AV_PIX_FMT_YUVJ444P:
355 case AV_PIX_FMT_YUV444P:
356 s->chroma_format = CHROMA_444;
358 case AV_PIX_FMT_YUVJ422P:
359 case AV_PIX_FMT_YUV422P:
360 s->chroma_format = CHROMA_422;
362 case AV_PIX_FMT_YUVJ420P:
363 case AV_PIX_FMT_YUV420P:
365 s->chroma_format = CHROMA_420;
369 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
371 #if FF_API_PRIVATE_OPT
372 FF_DISABLE_DEPRECATION_WARNINGS
373 if (avctx->rtp_payload_size)
374 s->rtp_payload_size = avctx->rtp_payload_size;
375 if (avctx->me_penalty_compensation)
376 s->me_penalty_compensation = avctx->me_penalty_compensation;
378 s->me_pre = avctx->pre_me;
379 FF_ENABLE_DEPRECATION_WARNINGS
382 s->bit_rate = avctx->bit_rate;
383 s->width = avctx->width;
384 s->height = avctx->height;
385 if (avctx->gop_size > 600 &&
386 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
387 av_log(avctx, AV_LOG_WARNING,
388 "keyframe interval too large!, reducing it from %d to %d\n",
389 avctx->gop_size, 600);
390 avctx->gop_size = 600;
392 s->gop_size = avctx->gop_size;
394 if (avctx->max_b_frames > MAX_B_FRAMES) {
395 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
396 "is %d.\n", MAX_B_FRAMES);
397 avctx->max_b_frames = MAX_B_FRAMES;
399 s->max_b_frames = avctx->max_b_frames;
400 s->codec_id = avctx->codec->id;
401 s->strict_std_compliance = avctx->strict_std_compliance;
402 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
403 s->rtp_mode = !!s->rtp_payload_size;
404 s->intra_dc_precision = avctx->intra_dc_precision;
406 // workaround some differences between how applications specify dc precision
407 if (s->intra_dc_precision < 0) {
408 s->intra_dc_precision += 8;
409 } else if (s->intra_dc_precision >= 8)
410 s->intra_dc_precision -= 8;
412 if (s->intra_dc_precision < 0) {
413 av_log(avctx, AV_LOG_ERROR,
414 "intra dc precision must be positive, note some applications use"
415 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
416 return AVERROR(EINVAL);
419 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
422 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
423 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
424 return AVERROR(EINVAL);
426 s->user_specified_pts = AV_NOPTS_VALUE;
428 if (s->gop_size <= 1) {
436 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
438 s->adaptive_quant = (avctx->lumi_masking ||
439 avctx->dark_masking ||
440 avctx->temporal_cplx_masking ||
441 avctx->spatial_cplx_masking ||
444 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
447 s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
449 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
450 switch(avctx->codec_id) {
451 case AV_CODEC_ID_MPEG1VIDEO:
452 case AV_CODEC_ID_MPEG2VIDEO:
453 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
455 case AV_CODEC_ID_MPEG4:
456 case AV_CODEC_ID_MSMPEG4V1:
457 case AV_CODEC_ID_MSMPEG4V2:
458 case AV_CODEC_ID_MSMPEG4V3:
459 if (avctx->rc_max_rate >= 15000000) {
460 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
461 } else if(avctx->rc_max_rate >= 2000000) {
462 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
463 } else if(avctx->rc_max_rate >= 384000) {
464 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
466 avctx->rc_buffer_size = 40;
467 avctx->rc_buffer_size *= 16384;
470 if (avctx->rc_buffer_size) {
471 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
475 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
476 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
477 return AVERROR(EINVAL);
480 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
481 av_log(avctx, AV_LOG_INFO,
482 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
485 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
486 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
487 return AVERROR(EINVAL);
490 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
491 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
492 return AVERROR(EINVAL);
495 if (avctx->rc_max_rate &&
496 avctx->rc_max_rate == avctx->bit_rate &&
497 avctx->rc_max_rate != avctx->rc_min_rate) {
498 av_log(avctx, AV_LOG_INFO,
499 "impossible bitrate constraints, this will fail\n");
502 if (avctx->rc_buffer_size &&
503 avctx->bit_rate * (int64_t)avctx->time_base.num >
504 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
505 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
506 return AVERROR(EINVAL);
509 if (!s->fixed_qscale &&
510 avctx->bit_rate * av_q2d(avctx->time_base) >
511 avctx->bit_rate_tolerance) {
512 av_log(avctx, AV_LOG_WARNING,
513 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
514 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
517 if (avctx->rc_max_rate &&
518 avctx->rc_min_rate == avctx->rc_max_rate &&
519 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
520 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
521 90000LL * (avctx->rc_buffer_size - 1) >
522 avctx->rc_max_rate * 0xFFFFLL) {
523 av_log(avctx, AV_LOG_INFO,
524 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
525 "specified vbv buffer is too large for the given bitrate!\n");
528 if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
529 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
530 s->codec_id != AV_CODEC_ID_FLV1) {
531 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
532 return AVERROR(EINVAL);
535 if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
536 av_log(avctx, AV_LOG_ERROR,
537 "OBMC is only supported with simple mb decision\n");
538 return AVERROR(EINVAL);
541 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
542 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
543 return AVERROR(EINVAL);
546 if (s->max_b_frames &&
547 s->codec_id != AV_CODEC_ID_MPEG4 &&
548 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
549 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
550 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
551 return AVERROR(EINVAL);
553 if (s->max_b_frames < 0) {
554 av_log(avctx, AV_LOG_ERROR,
555 "max b frames must be 0 or positive for mpegvideo based encoders\n");
556 return AVERROR(EINVAL);
559 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
560 s->codec_id == AV_CODEC_ID_H263 ||
561 s->codec_id == AV_CODEC_ID_H263P) &&
562 (avctx->sample_aspect_ratio.num > 255 ||
563 avctx->sample_aspect_ratio.den > 255)) {
564 av_log(avctx, AV_LOG_WARNING,
565 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
566 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
567 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
568 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
571 if ((s->codec_id == AV_CODEC_ID_H263 ||
572 s->codec_id == AV_CODEC_ID_H263P) &&
573 (avctx->width > 2048 ||
574 avctx->height > 1152 )) {
575 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
576 return AVERROR(EINVAL);
578 if ((s->codec_id == AV_CODEC_ID_H263 ||
579 s->codec_id == AV_CODEC_ID_H263P) &&
580 ((avctx->width &3) ||
581 (avctx->height&3) )) {
582 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
583 return AVERROR(EINVAL);
586 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
587 (avctx->width > 4095 ||
588 avctx->height > 4095 )) {
589 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
590 return AVERROR(EINVAL);
593 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
594 (avctx->width > 16383 ||
595 avctx->height > 16383 )) {
596 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
597 return AVERROR(EINVAL);
600 if (s->codec_id == AV_CODEC_ID_RV10 &&
602 avctx->height&15 )) {
603 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
604 return AVERROR(EINVAL);
607 if (s->codec_id == AV_CODEC_ID_RV20 &&
610 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
611 return AVERROR(EINVAL);
614 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
615 s->codec_id == AV_CODEC_ID_WMV2) &&
617 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
618 return AVERROR(EINVAL);
621 if ((avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
622 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
623 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
624 return AVERROR(EINVAL);
627 #if FF_API_PRIVATE_OPT
628 FF_DISABLE_DEPRECATION_WARNINGS
629 if (avctx->mpeg_quant)
631 FF_ENABLE_DEPRECATION_WARNINGS
634 // FIXME mpeg2 uses that too
635 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
636 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
637 av_log(avctx, AV_LOG_ERROR,
638 "mpeg2 style quantization not supported by codec\n");
639 return AVERROR(EINVAL);
642 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
643 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
644 return AVERROR(EINVAL);
647 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
648 avctx->mb_decision != FF_MB_DECISION_RD) {
649 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
650 return AVERROR(EINVAL);
653 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
654 (s->codec_id == AV_CODEC_ID_AMV ||
655 s->codec_id == AV_CODEC_ID_MJPEG)) {
656 // Used to produce garbage with MJPEG.
657 av_log(avctx, AV_LOG_ERROR,
658 "QP RD is no longer compatible with MJPEG or AMV\n");
659 return AVERROR(EINVAL);
662 #if FF_API_PRIVATE_OPT
663 FF_DISABLE_DEPRECATION_WARNINGS
664 if (avctx->scenechange_threshold)
665 s->scenechange_threshold = avctx->scenechange_threshold;
666 FF_ENABLE_DEPRECATION_WARNINGS
669 if (s->scenechange_threshold < 1000000000 &&
670 (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
671 av_log(avctx, AV_LOG_ERROR,
672 "closed gop with scene change detection are not supported yet, "
673 "set threshold to 1000000000\n");
674 return AVERROR_PATCHWELCOME;
677 if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
678 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
679 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
680 av_log(avctx, AV_LOG_ERROR,
681 "low delay forcing is only available for mpeg2, "
682 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
683 return AVERROR(EINVAL);
685 if (s->max_b_frames != 0) {
686 av_log(avctx, AV_LOG_ERROR,
687 "B-frames cannot be used with low delay\n");
688 return AVERROR(EINVAL);
692 if (s->q_scale_type == 1) {
693 if (avctx->qmax > 28) {
694 av_log(avctx, AV_LOG_ERROR,
695 "non linear quant only supports qmax <= 28 currently\n");
696 return AVERROR_PATCHWELCOME;
700 if (avctx->slices > 1 &&
701 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
702 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
703 return AVERROR(EINVAL);
706 if (avctx->thread_count > 1 &&
707 s->codec_id != AV_CODEC_ID_MPEG4 &&
708 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
709 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
710 s->codec_id != AV_CODEC_ID_MJPEG &&
711 (s->codec_id != AV_CODEC_ID_H263P)) {
712 av_log(avctx, AV_LOG_ERROR,
713 "multi threaded encoding not supported by codec\n");
714 return AVERROR_PATCHWELCOME;
717 if (avctx->thread_count < 1) {
718 av_log(avctx, AV_LOG_ERROR,
719 "automatic thread number detection not supported by codec, "
721 return AVERROR_PATCHWELCOME;
724 if (!avctx->time_base.den || !avctx->time_base.num) {
725 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
726 return AVERROR(EINVAL);
729 #if FF_API_PRIVATE_OPT
730 FF_DISABLE_DEPRECATION_WARNINGS
731 if (avctx->b_frame_strategy)
732 s->b_frame_strategy = avctx->b_frame_strategy;
733 if (avctx->b_sensitivity != 40)
734 s->b_sensitivity = avctx->b_sensitivity;
735 FF_ENABLE_DEPRECATION_WARNINGS
738 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
739 av_log(avctx, AV_LOG_INFO,
740 "notice: b_frame_strategy only affects the first pass\n");
741 s->b_frame_strategy = 0;
744 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
746 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
747 avctx->time_base.den /= i;
748 avctx->time_base.num /= i;
752 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
753 // (a + x * 3 / 8) / x
754 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
755 s->inter_quant_bias = 0;
757 s->intra_quant_bias = 0;
759 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
762 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
763 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
764 return AVERROR(EINVAL);
767 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
769 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
770 avctx->time_base.den > (1 << 16) - 1) {
771 av_log(avctx, AV_LOG_ERROR,
772 "timebase %d/%d not supported by MPEG 4 standard, "
773 "the maximum admitted value for the timebase denominator "
774 "is %d\n", avctx->time_base.num, avctx->time_base.den,
776 return AVERROR(EINVAL);
778 s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
780 switch (avctx->codec->id) {
781 case AV_CODEC_ID_MPEG1VIDEO:
782 s->out_format = FMT_MPEG1;
783 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
784 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
786 case AV_CODEC_ID_MPEG2VIDEO:
787 s->out_format = FMT_MPEG1;
788 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
789 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
792 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
793 case AV_CODEC_ID_MJPEG:
794 case AV_CODEC_ID_AMV:
795 s->out_format = FMT_MJPEG;
796 s->intra_only = 1; /* force intra only for jpeg */
797 if ((ret = ff_mjpeg_encode_init(s)) < 0)
803 case AV_CODEC_ID_SPEEDHQ:
804 s->out_format = FMT_SPEEDHQ;
805 s->intra_only = 1; /* force intra only for SHQ */
806 if (!CONFIG_SPEEDHQ_ENCODER)
807 return AVERROR_ENCODER_NOT_FOUND;
808 if ((ret = ff_speedhq_encode_init(s)) < 0)
813 case AV_CODEC_ID_H261:
814 if (!CONFIG_H261_ENCODER)
815 return AVERROR_ENCODER_NOT_FOUND;
816 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
817 av_log(avctx, AV_LOG_ERROR,
818 "The specified picture size of %dx%d is not valid for the "
819 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
820 s->width, s->height);
821 return AVERROR(EINVAL);
823 s->out_format = FMT_H261;
826 s->rtp_mode = 0; /* Sliced encoding not supported */
828 case AV_CODEC_ID_H263:
829 if (!CONFIG_H263_ENCODER)
830 return AVERROR_ENCODER_NOT_FOUND;
831 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
832 s->width, s->height) == 8) {
833 av_log(avctx, AV_LOG_ERROR,
834 "The specified picture size of %dx%d is not valid for "
835 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
836 "352x288, 704x576, and 1408x1152. "
837 "Try H.263+.\n", s->width, s->height);
838 return AVERROR(EINVAL);
840 s->out_format = FMT_H263;
844 case AV_CODEC_ID_H263P:
845 s->out_format = FMT_H263;
848 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
849 s->modified_quant = s->h263_aic;
850 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
851 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
854 /* These are just to be sure */
858 case AV_CODEC_ID_FLV1:
859 s->out_format = FMT_H263;
860 s->h263_flv = 2; /* format = 1; 11-bit codes */
861 s->unrestricted_mv = 1;
862 s->rtp_mode = 0; /* don't allow GOB */
866 case AV_CODEC_ID_RV10:
867 s->out_format = FMT_H263;
871 case AV_CODEC_ID_RV20:
872 s->out_format = FMT_H263;
875 s->modified_quant = 1;
879 s->unrestricted_mv = 0;
881 case AV_CODEC_ID_MPEG4:
882 s->out_format = FMT_H263;
884 s->unrestricted_mv = 1;
885 s->low_delay = s->max_b_frames ? 0 : 1;
886 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
888 case AV_CODEC_ID_MSMPEG4V2:
889 s->out_format = FMT_H263;
891 s->unrestricted_mv = 1;
892 s->msmpeg4_version = 2;
896 case AV_CODEC_ID_MSMPEG4V3:
897 s->out_format = FMT_H263;
899 s->unrestricted_mv = 1;
900 s->msmpeg4_version = 3;
901 s->flipflop_rounding = 1;
905 case AV_CODEC_ID_WMV1:
906 s->out_format = FMT_H263;
908 s->unrestricted_mv = 1;
909 s->msmpeg4_version = 4;
910 s->flipflop_rounding = 1;
914 case AV_CODEC_ID_WMV2:
915 s->out_format = FMT_H263;
917 s->unrestricted_mv = 1;
918 s->msmpeg4_version = 5;
919 s->flipflop_rounding = 1;
924 return AVERROR(EINVAL);
927 #if FF_API_PRIVATE_OPT
928 FF_DISABLE_DEPRECATION_WARNINGS
929 if (avctx->noise_reduction)
930 s->noise_reduction = avctx->noise_reduction;
931 FF_ENABLE_DEPRECATION_WARNINGS
934 avctx->has_b_frames = !s->low_delay;
938 s->progressive_frame =
939 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
940 AV_CODEC_FLAG_INTERLACED_ME) ||
945 if ((ret = ff_mpv_common_init(s)) < 0)
948 ff_fdctdsp_init(&s->fdsp, avctx);
949 ff_me_cmp_init(&s->mecc, avctx);
950 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
951 ff_pixblockdsp_init(&s->pdsp, avctx);
952 ff_qpeldsp_init(&s->qdsp);
954 if (s->msmpeg4_version) {
955 int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
956 if (!(s->ac_stats = av_mallocz(ac_stats_size)))
957 return AVERROR(ENOMEM);
960 if (!(avctx->stats_out = av_mallocz(256)) ||
961 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
962 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
963 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
964 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
965 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
966 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
967 !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
968 !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
969 return AVERROR(ENOMEM);
971 if (s->noise_reduction) {
972 if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
973 return AVERROR(ENOMEM);
976 ff_dct_encode_init(s);
978 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
979 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
981 if (s->slice_context_count > 1) {
984 if (avctx->codec_id == AV_CODEC_ID_H263P)
985 s->h263_slice_structured = 1;
988 s->quant_precision = 5;
990 #if FF_API_PRIVATE_OPT
991 FF_DISABLE_DEPRECATION_WARNINGS
992 if (avctx->frame_skip_threshold)
993 s->frame_skip_threshold = avctx->frame_skip_threshold;
994 if (avctx->frame_skip_factor)
995 s->frame_skip_factor = avctx->frame_skip_factor;
996 if (avctx->frame_skip_exp)
997 s->frame_skip_exp = avctx->frame_skip_exp;
998 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
999 s->frame_skip_cmp = avctx->frame_skip_cmp;
1000 FF_ENABLE_DEPRECATION_WARNINGS
1003 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
1004 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
1006 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
1007 ff_h261_encode_init(s);
1008 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
1009 ff_h263_encode_init(s);
1010 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
1011 ff_msmpeg4_encode_init(s);
1012 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1013 && s->out_format == FMT_MPEG1)
1014 ff_mpeg1_encode_init(s);
1017 for (i = 0; i < 64; i++) {
1018 int j = s->idsp.idct_permutation[i];
1019 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1021 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1022 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1023 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1024 s->intra_matrix[j] =
1025 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1026 } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
1027 s->intra_matrix[j] =
1028 s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1031 s->chroma_intra_matrix[j] =
1032 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1033 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1035 if (avctx->intra_matrix)
1036 s->intra_matrix[j] = avctx->intra_matrix[i];
1037 if (avctx->inter_matrix)
1038 s->inter_matrix[j] = avctx->inter_matrix[i];
1041 /* precompute matrix */
1042 /* for mjpeg, we do include qscale in the matrix */
1043 if (s->out_format != FMT_MJPEG) {
1044 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1045 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1047 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1048 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1052 if ((ret = ff_rate_control_init(s)) < 0)
1055 #if FF_API_PRIVATE_OPT
1056 FF_DISABLE_DEPRECATION_WARNINGS
1057 if (avctx->brd_scale)
1058 s->brd_scale = avctx->brd_scale;
1060 if (avctx->prediction_method)
1061 s->pred = avctx->prediction_method + 1;
1062 FF_ENABLE_DEPRECATION_WARNINGS
1065 if (s->b_frame_strategy == 2) {
1066 for (i = 0; i < s->max_b_frames + 2; i++) {
1067 s->tmp_frames[i] = av_frame_alloc();
1068 if (!s->tmp_frames[i])
1069 return AVERROR(ENOMEM);
1071 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1072 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1073 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1075 ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1081 cpb_props = ff_add_cpb_side_data(avctx);
1083 return AVERROR(ENOMEM);
1084 cpb_props->max_bitrate = avctx->rc_max_rate;
1085 cpb_props->min_bitrate = avctx->rc_min_rate;
1086 cpb_props->avg_bitrate = avctx->bit_rate;
1087 cpb_props->buffer_size = avctx->rc_buffer_size;
1092 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1094 MpegEncContext *s = avctx->priv_data;
1097 ff_rate_control_uninit(s);
1099 ff_mpv_common_end(s);
1100 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
1101 s->out_format == FMT_MJPEG)
1102 ff_mjpeg_encode_close(s);
1104 av_freep(&avctx->extradata);
1106 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1107 av_frame_free(&s->tmp_frames[i]);
1109 ff_free_picture_tables(&s->new_picture);
1110 ff_mpeg_unref_picture(avctx, &s->new_picture);
1112 av_freep(&avctx->stats_out);
1113 av_freep(&s->ac_stats);
1115 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1116 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1117 s->q_chroma_intra_matrix= NULL;
1118 s->q_chroma_intra_matrix16= NULL;
1119 av_freep(&s->q_intra_matrix);
1120 av_freep(&s->q_inter_matrix);
1121 av_freep(&s->q_intra_matrix16);
1122 av_freep(&s->q_inter_matrix16);
1123 av_freep(&s->input_picture);
1124 av_freep(&s->reordered_input_picture);
1125 av_freep(&s->dct_offset);
1130 static int get_sae(uint8_t *src, int ref, int stride)
1135 for (y = 0; y < 16; y++) {
1136 for (x = 0; x < 16; x++) {
1137 acc += FFABS(src[x + y * stride] - ref);
1144 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1145 uint8_t *ref, int stride)
1151 h = s->height & ~15;
1153 for (y = 0; y < h; y += 16) {
1154 for (x = 0; x < w; x += 16) {
1155 int offset = x + y * stride;
1156 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1158 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1159 int sae = get_sae(src + offset, mean, stride);
1161 acc += sae + 500 < sad;
1167 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1169 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1170 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1171 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1172 &s->linesize, &s->uvlinesize);
1175 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1177 Picture *pic = NULL;
1179 int i, display_picture_number = 0, ret;
1180 int encoding_delay = s->max_b_frames ? s->max_b_frames
1181 : (s->low_delay ? 0 : 1);
1182 int flush_offset = 1;
1187 display_picture_number = s->input_picture_number++;
1189 if (pts != AV_NOPTS_VALUE) {
1190 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1191 int64_t last = s->user_specified_pts;
1194 av_log(s->avctx, AV_LOG_ERROR,
1195 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1197 return AVERROR(EINVAL);
1200 if (!s->low_delay && display_picture_number == 1)
1201 s->dts_delta = pts - last;
1203 s->user_specified_pts = pts;
1205 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1206 s->user_specified_pts =
1207 pts = s->user_specified_pts + 1;
1208 av_log(s->avctx, AV_LOG_INFO,
1209 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1212 pts = display_picture_number;
1216 if (!pic_arg->buf[0] ||
1217 pic_arg->linesize[0] != s->linesize ||
1218 pic_arg->linesize[1] != s->uvlinesize ||
1219 pic_arg->linesize[2] != s->uvlinesize)
1221 if ((s->width & 15) || (s->height & 15))
1223 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1225 if (s->linesize & (STRIDE_ALIGN-1))
1228 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1229 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1231 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1235 pic = &s->picture[i];
1239 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1242 ret = alloc_picture(s, pic, direct);
1247 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1248 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1249 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1252 int h_chroma_shift, v_chroma_shift;
1253 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1257 for (i = 0; i < 3; i++) {
1258 int src_stride = pic_arg->linesize[i];
1259 int dst_stride = i ? s->uvlinesize : s->linesize;
1260 int h_shift = i ? h_chroma_shift : 0;
1261 int v_shift = i ? v_chroma_shift : 0;
1262 int w = s->width >> h_shift;
1263 int h = s->height >> v_shift;
1264 uint8_t *src = pic_arg->data[i];
1265 uint8_t *dst = pic->f->data[i];
1268 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1269 && !s->progressive_sequence
1270 && FFALIGN(s->height, 32) - s->height > 16)
1273 if (!s->avctx->rc_buffer_size)
1274 dst += INPLACE_OFFSET;
1276 if (src_stride == dst_stride)
1277 memcpy(dst, src, src_stride * h);
1280 uint8_t *dst2 = dst;
1282 memcpy(dst2, src, w);
1287 if ((s->width & 15) || (s->height & (vpad-1))) {
1288 s->mpvencdsp.draw_edges(dst, dst_stride,
1298 ret = av_frame_copy_props(pic->f, pic_arg);
1302 pic->f->display_picture_number = display_picture_number;
1303 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1305 /* Flushing: When we have not received enough input frames,
1306 * ensure s->input_picture[0] contains the first picture */
1307 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1308 if (s->input_picture[flush_offset])
1311 if (flush_offset <= 1)
1314 encoding_delay = encoding_delay - flush_offset + 1;
1317 /* shift buffer entries */
1318 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1319 s->input_picture[i - flush_offset] = s->input_picture[i];
1321 s->input_picture[encoding_delay] = (Picture*) pic;
1326 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1330 int64_t score64 = 0;
1332 for (plane = 0; plane < 3; plane++) {
1333 const int stride = p->f->linesize[plane];
1334 const int bw = plane ? 1 : 2;
1335 for (y = 0; y < s->mb_height * bw; y++) {
1336 for (x = 0; x < s->mb_width * bw; x++) {
1337 int off = p->shared ? 0 : 16;
1338 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1339 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1340 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1342 switch (FFABS(s->frame_skip_exp)) {
1343 case 0: score = FFMAX(score, v); break;
1344 case 1: score += FFABS(v); break;
1345 case 2: score64 += v * (int64_t)v; break;
1346 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1347 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1356 if (s->frame_skip_exp < 0)
1357 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1358 -1.0/s->frame_skip_exp);
1360 if (score64 < s->frame_skip_threshold)
1362 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1367 static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
1372 ret = avcodec_send_frame(c, frame);
1377 ret = avcodec_receive_packet(c, pkt);
1380 av_packet_unref(pkt);
1381 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1388 static int estimate_best_b_count(MpegEncContext *s)
1390 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1392 const int scale = s->brd_scale;
1393 int width = s->width >> scale;
1394 int height = s->height >> scale;
1395 int i, j, out_size, p_lambda, b_lambda, lambda2;
1396 int64_t best_rd = INT64_MAX;
1397 int best_b_count = -1;
1400 av_assert0(scale >= 0 && scale <= 3);
1402 pkt = av_packet_alloc();
1404 return AVERROR(ENOMEM);
1407 //s->next_picture_ptr->quality;
1408 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1409 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1410 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1411 if (!b_lambda) // FIXME we should do this somewhere else
1412 b_lambda = p_lambda;
1413 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1416 for (i = 0; i < s->max_b_frames + 2; i++) {
1417 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1418 s->next_picture_ptr;
1421 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1422 pre_input = *pre_input_ptr;
1423 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1425 if (!pre_input.shared && i) {
1426 data[0] += INPLACE_OFFSET;
1427 data[1] += INPLACE_OFFSET;
1428 data[2] += INPLACE_OFFSET;
1431 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1432 s->tmp_frames[i]->linesize[0],
1434 pre_input.f->linesize[0],
1436 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1437 s->tmp_frames[i]->linesize[1],
1439 pre_input.f->linesize[1],
1440 width >> 1, height >> 1);
1441 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1442 s->tmp_frames[i]->linesize[2],
1444 pre_input.f->linesize[2],
1445 width >> 1, height >> 1);
1449 for (j = 0; j < s->max_b_frames + 1; j++) {
1453 if (!s->input_picture[j])
1456 c = avcodec_alloc_context3(NULL);
1458 ret = AVERROR(ENOMEM);
1464 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1465 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1466 c->mb_decision = s->avctx->mb_decision;
1467 c->me_cmp = s->avctx->me_cmp;
1468 c->mb_cmp = s->avctx->mb_cmp;
1469 c->me_sub_cmp = s->avctx->me_sub_cmp;
1470 c->pix_fmt = AV_PIX_FMT_YUV420P;
1471 c->time_base = s->avctx->time_base;
1472 c->max_b_frames = s->max_b_frames;
1474 ret = avcodec_open2(c, codec, NULL);
1479 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1480 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1482 out_size = encode_frame(c, s->tmp_frames[0], pkt);
1488 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1490 for (i = 0; i < s->max_b_frames + 1; i++) {
1491 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1493 s->tmp_frames[i + 1]->pict_type = is_p ?
1494 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1495 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1497 out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1503 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1506 /* get the delayed frames */
1507 out_size = encode_frame(c, NULL, pkt);
1512 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1514 rd += c->error[0] + c->error[1] + c->error[2];
1522 avcodec_free_context(&c);
1523 av_packet_unref(pkt);
1530 av_packet_free(&pkt);
1532 return best_b_count;
1535 static int select_input_picture(MpegEncContext *s)
1539 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1540 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1541 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1543 /* set next picture type & ordering */
1544 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1545 if (s->frame_skip_threshold || s->frame_skip_factor) {
1546 if (s->picture_in_gop_number < s->gop_size &&
1547 s->next_picture_ptr &&
1548 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1549 // FIXME check that the gop check above is +-1 correct
1550 av_frame_unref(s->input_picture[0]->f);
1552 ff_vbv_update(s, 0);
1558 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1559 !s->next_picture_ptr || s->intra_only) {
1560 s->reordered_input_picture[0] = s->input_picture[0];
1561 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1562 s->reordered_input_picture[0]->f->coded_picture_number =
1563 s->coded_picture_number++;
1567 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1568 for (i = 0; i < s->max_b_frames + 1; i++) {
1569 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1571 if (pict_num >= s->rc_context.num_entries)
1573 if (!s->input_picture[i]) {
1574 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1578 s->input_picture[i]->f->pict_type =
1579 s->rc_context.entry[pict_num].new_pict_type;
1583 if (s->b_frame_strategy == 0) {
1584 b_frames = s->max_b_frames;
1585 while (b_frames && !s->input_picture[b_frames])
1587 } else if (s->b_frame_strategy == 1) {
1588 for (i = 1; i < s->max_b_frames + 1; i++) {
1589 if (s->input_picture[i] &&
1590 s->input_picture[i]->b_frame_score == 0) {
1591 s->input_picture[i]->b_frame_score =
1593 s->input_picture[i ]->f->data[0],
1594 s->input_picture[i - 1]->f->data[0],
1598 for (i = 0; i < s->max_b_frames + 1; i++) {
1599 if (!s->input_picture[i] ||
1600 s->input_picture[i]->b_frame_score - 1 >
1601 s->mb_num / s->b_sensitivity)
1605 b_frames = FFMAX(0, i - 1);
1608 for (i = 0; i < b_frames + 1; i++) {
1609 s->input_picture[i]->b_frame_score = 0;
1611 } else if (s->b_frame_strategy == 2) {
1612 b_frames = estimate_best_b_count(s);
1619 for (i = b_frames - 1; i >= 0; i--) {
1620 int type = s->input_picture[i]->f->pict_type;
1621 if (type && type != AV_PICTURE_TYPE_B)
1624 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1625 b_frames == s->max_b_frames) {
1626 av_log(s->avctx, AV_LOG_ERROR,
1627 "warning, too many B-frames in a row\n");
1630 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1631 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1632 s->gop_size > s->picture_in_gop_number) {
1633 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1635 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1637 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1641 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1642 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1645 s->reordered_input_picture[0] = s->input_picture[b_frames];
1646 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1647 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1648 s->reordered_input_picture[0]->f->coded_picture_number =
1649 s->coded_picture_number++;
1650 for (i = 0; i < b_frames; i++) {
1651 s->reordered_input_picture[i + 1] = s->input_picture[i];
1652 s->reordered_input_picture[i + 1]->f->pict_type =
1654 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1655 s->coded_picture_number++;
1660 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1662 if (s->reordered_input_picture[0]) {
1663 s->reordered_input_picture[0]->reference =
1664 s->reordered_input_picture[0]->f->pict_type !=
1665 AV_PICTURE_TYPE_B ? 3 : 0;
1667 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1670 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1671 // input is a shared pix, so we can't modify it -> allocate a new
1672 // one & ensure that the shared one is reuseable
1675 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1678 pic = &s->picture[i];
1680 pic->reference = s->reordered_input_picture[0]->reference;
1681 if (alloc_picture(s, pic, 0) < 0) {
1685 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1689 /* mark us unused / free shared pic */
1690 av_frame_unref(s->reordered_input_picture[0]->f);
1691 s->reordered_input_picture[0]->shared = 0;
1693 s->current_picture_ptr = pic;
1695 // input is not a shared pix -> reuse buffer for current_pix
1696 s->current_picture_ptr = s->reordered_input_picture[0];
1697 for (i = 0; i < 4; i++) {
1698 if (s->new_picture.f->data[i])
1699 s->new_picture.f->data[i] += INPLACE_OFFSET;
1702 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1703 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1704 s->current_picture_ptr)) < 0)
1707 s->picture_number = s->new_picture.f->display_picture_number;
1712 static void frame_end(MpegEncContext *s)
1714 if (s->unrestricted_mv &&
1715 s->current_picture.reference &&
1717 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1718 int hshift = desc->log2_chroma_w;
1719 int vshift = desc->log2_chroma_h;
1720 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1721 s->current_picture.f->linesize[0],
1722 s->h_edge_pos, s->v_edge_pos,
1723 EDGE_WIDTH, EDGE_WIDTH,
1724 EDGE_TOP | EDGE_BOTTOM);
1725 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1726 s->current_picture.f->linesize[1],
1727 s->h_edge_pos >> hshift,
1728 s->v_edge_pos >> vshift,
1729 EDGE_WIDTH >> hshift,
1730 EDGE_WIDTH >> vshift,
1731 EDGE_TOP | EDGE_BOTTOM);
1732 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1733 s->current_picture.f->linesize[2],
1734 s->h_edge_pos >> hshift,
1735 s->v_edge_pos >> vshift,
1736 EDGE_WIDTH >> hshift,
1737 EDGE_WIDTH >> vshift,
1738 EDGE_TOP | EDGE_BOTTOM);
1743 s->last_pict_type = s->pict_type;
1744 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1745 if (s->pict_type!= AV_PICTURE_TYPE_B)
1746 s->last_non_b_pict_type = s->pict_type;
1748 #if FF_API_CODED_FRAME
1749 FF_DISABLE_DEPRECATION_WARNINGS
1750 av_frame_unref(s->avctx->coded_frame);
1751 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1752 FF_ENABLE_DEPRECATION_WARNINGS
1754 #if FF_API_ERROR_FRAME
1755 FF_DISABLE_DEPRECATION_WARNINGS
1756 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1757 sizeof(s->current_picture.encoding_error));
1758 FF_ENABLE_DEPRECATION_WARNINGS
1762 static void update_noise_reduction(MpegEncContext *s)
1766 for (intra = 0; intra < 2; intra++) {
1767 if (s->dct_count[intra] > (1 << 16)) {
1768 for (i = 0; i < 64; i++) {
1769 s->dct_error_sum[intra][i] >>= 1;
1771 s->dct_count[intra] >>= 1;
1774 for (i = 0; i < 64; i++) {
1775 s->dct_offset[intra][i] = (s->noise_reduction *
1776 s->dct_count[intra] +
1777 s->dct_error_sum[intra][i] / 2) /
1778 (s->dct_error_sum[intra][i] + 1);
1783 static int frame_start(MpegEncContext *s)
1787 /* mark & release old frames */
1788 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1789 s->last_picture_ptr != s->next_picture_ptr &&
1790 s->last_picture_ptr->f->buf[0]) {
1791 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1794 s->current_picture_ptr->f->pict_type = s->pict_type;
1795 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1797 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1798 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1799 s->current_picture_ptr)) < 0)
1802 if (s->pict_type != AV_PICTURE_TYPE_B) {
1803 s->last_picture_ptr = s->next_picture_ptr;
1805 s->next_picture_ptr = s->current_picture_ptr;
1808 if (s->last_picture_ptr) {
1809 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1810 if (s->last_picture_ptr->f->buf[0] &&
1811 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1812 s->last_picture_ptr)) < 0)
1815 if (s->next_picture_ptr) {
1816 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1817 if (s->next_picture_ptr->f->buf[0] &&
1818 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1819 s->next_picture_ptr)) < 0)
1823 if (s->picture_structure!= PICT_FRAME) {
1825 for (i = 0; i < 4; i++) {
1826 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1827 s->current_picture.f->data[i] +=
1828 s->current_picture.f->linesize[i];
1830 s->current_picture.f->linesize[i] *= 2;
1831 s->last_picture.f->linesize[i] *= 2;
1832 s->next_picture.f->linesize[i] *= 2;
1836 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1837 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1838 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1839 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1840 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1841 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1843 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1844 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1847 if (s->dct_error_sum) {
1848 av_assert2(s->noise_reduction && s->encoding);
1849 update_noise_reduction(s);
1855 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1856 const AVFrame *pic_arg, int *got_packet)
1858 MpegEncContext *s = avctx->priv_data;
1859 int i, stuffing_count, ret;
1860 int context_count = s->slice_context_count;
1862 s->vbv_ignore_qmax = 0;
1864 s->picture_in_gop_number++;
1866 if (load_input_picture(s, pic_arg) < 0)
1869 if (select_input_picture(s) < 0) {
1874 if (s->new_picture.f->data[0]) {
1875 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1876 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1878 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1879 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1882 s->mb_info_ptr = av_packet_new_side_data(pkt,
1883 AV_PKT_DATA_H263_MB_INFO,
1884 s->mb_width*s->mb_height*12);
1885 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1888 for (i = 0; i < context_count; i++) {
1889 int start_y = s->thread_context[i]->start_mb_y;
1890 int end_y = s->thread_context[i]-> end_mb_y;
1891 int h = s->mb_height;
1892 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1893 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1895 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1898 s->pict_type = s->new_picture.f->pict_type;
1900 ret = frame_start(s);
1904 ret = encode_picture(s, s->picture_number);
1905 if (growing_buffer) {
1906 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1907 pkt->data = s->pb.buf;
1908 pkt->size = avctx->internal->byte_buffer_size;
1913 #if FF_API_STAT_BITS
1914 FF_DISABLE_DEPRECATION_WARNINGS
1915 avctx->header_bits = s->header_bits;
1916 avctx->mv_bits = s->mv_bits;
1917 avctx->misc_bits = s->misc_bits;
1918 avctx->i_tex_bits = s->i_tex_bits;
1919 avctx->p_tex_bits = s->p_tex_bits;
1920 avctx->i_count = s->i_count;
1921 // FIXME f/b_count in avctx
1922 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1923 avctx->skip_count = s->skip_count;
1924 FF_ENABLE_DEPRECATION_WARNINGS
1929 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
1930 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1932 if (avctx->rc_buffer_size) {
1933 RateControlContext *rcc = &s->rc_context;
1934 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1935 int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1936 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1938 if (put_bits_count(&s->pb) > max_size &&
1939 s->lambda < s->lmax) {
1940 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1941 (s->qscale + 1) / s->qscale);
1942 if (s->adaptive_quant) {
1944 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1945 s->lambda_table[i] =
1946 FFMAX(s->lambda_table[i] + min_step,
1947 s->lambda_table[i] * (s->qscale + 1) /
1950 s->mb_skipped = 0; // done in frame_start()
1951 // done in encode_picture() so we must undo it
1952 if (s->pict_type == AV_PICTURE_TYPE_P) {
1953 if (s->flipflop_rounding ||
1954 s->codec_id == AV_CODEC_ID_H263P ||
1955 s->codec_id == AV_CODEC_ID_MPEG4)
1956 s->no_rounding ^= 1;
1958 if (s->pict_type != AV_PICTURE_TYPE_B) {
1959 s->time_base = s->last_time_base;
1960 s->last_non_b_time = s->time - s->pp_time;
1962 for (i = 0; i < context_count; i++) {
1963 PutBitContext *pb = &s->thread_context[i]->pb;
1964 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1966 s->vbv_ignore_qmax = 1;
1967 av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1971 av_assert0(avctx->rc_max_rate);
1974 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1975 ff_write_pass1_stats(s);
1977 for (i = 0; i < 4; i++) {
1978 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1979 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1981 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1982 s->current_picture_ptr->encoding_error,
1983 (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1986 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1987 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1988 s->misc_bits + s->i_tex_bits +
1990 flush_put_bits(&s->pb);
1991 s->frame_bits = put_bits_count(&s->pb);
1993 stuffing_count = ff_vbv_update(s, s->frame_bits);
1994 s->stuffing_bits = 8*stuffing_count;
1995 if (stuffing_count) {
1996 if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1997 av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
2001 switch (s->codec_id) {
2002 case AV_CODEC_ID_MPEG1VIDEO:
2003 case AV_CODEC_ID_MPEG2VIDEO:
2004 while (stuffing_count--) {
2005 put_bits(&s->pb, 8, 0);
2008 case AV_CODEC_ID_MPEG4:
2009 put_bits(&s->pb, 16, 0);
2010 put_bits(&s->pb, 16, 0x1C3);
2011 stuffing_count -= 4;
2012 while (stuffing_count--) {
2013 put_bits(&s->pb, 8, 0xFF);
2017 av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2019 flush_put_bits(&s->pb);
2020 s->frame_bits = put_bits_count(&s->pb);
2023 /* update MPEG-1/2 vbv_delay for CBR */
2024 if (avctx->rc_max_rate &&
2025 avctx->rc_min_rate == avctx->rc_max_rate &&
2026 s->out_format == FMT_MPEG1 &&
2027 90000LL * (avctx->rc_buffer_size - 1) <=
2028 avctx->rc_max_rate * 0xFFFFLL) {
2029 AVCPBProperties *props;
2032 int vbv_delay, min_delay;
2033 double inbits = avctx->rc_max_rate *
2034 av_q2d(avctx->time_base);
2035 int minbits = s->frame_bits - 8 *
2036 (s->vbv_delay_ptr - s->pb.buf - 1);
2037 double bits = s->rc_context.buffer_index + minbits - inbits;
2040 av_log(avctx, AV_LOG_ERROR,
2041 "Internal error, negative bits\n");
2043 av_assert1(s->repeat_first_field == 0);
2045 vbv_delay = bits * 90000 / avctx->rc_max_rate;
2046 min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2049 vbv_delay = FFMAX(vbv_delay, min_delay);
2051 av_assert0(vbv_delay < 0xFFFF);
2053 s->vbv_delay_ptr[0] &= 0xF8;
2054 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2055 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2056 s->vbv_delay_ptr[2] &= 0x07;
2057 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2059 props = av_cpb_properties_alloc(&props_size);
2061 return AVERROR(ENOMEM);
2062 props->vbv_delay = vbv_delay * 300;
2064 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2065 (uint8_t*)props, props_size);
2071 #if FF_API_VBV_DELAY
2072 FF_DISABLE_DEPRECATION_WARNINGS
2073 avctx->vbv_delay = vbv_delay * 300;
2074 FF_ENABLE_DEPRECATION_WARNINGS
2077 s->total_bits += s->frame_bits;
2078 #if FF_API_STAT_BITS
2079 FF_DISABLE_DEPRECATION_WARNINGS
2080 avctx->frame_bits = s->frame_bits;
2081 FF_ENABLE_DEPRECATION_WARNINGS
2085 pkt->pts = s->current_picture.f->pts;
2086 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2087 if (!s->current_picture.f->coded_picture_number)
2088 pkt->dts = pkt->pts - s->dts_delta;
2090 pkt->dts = s->reordered_pts;
2091 s->reordered_pts = pkt->pts;
2093 pkt->dts = pkt->pts;
2094 if (s->current_picture.f->key_frame)
2095 pkt->flags |= AV_PKT_FLAG_KEY;
2097 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2102 /* release non-reference frames */
2103 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2104 if (!s->picture[i].reference)
2105 ff_mpeg_unref_picture(avctx, &s->picture[i]);
2108 av_assert1((s->frame_bits & 7) == 0);
2110 pkt->size = s->frame_bits / 8;
2111 *got_packet = !!pkt->size;
2115 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2116 int n, int threshold)
2118 static const char tab[64] = {
2119 3, 2, 2, 1, 1, 1, 1, 1,
2120 1, 1, 1, 1, 1, 1, 1, 1,
2121 1, 1, 1, 1, 1, 1, 1, 1,
2122 0, 0, 0, 0, 0, 0, 0, 0,
2123 0, 0, 0, 0, 0, 0, 0, 0,
2124 0, 0, 0, 0, 0, 0, 0, 0,
2125 0, 0, 0, 0, 0, 0, 0, 0,
2126 0, 0, 0, 0, 0, 0, 0, 0
2131 int16_t *block = s->block[n];
2132 const int last_index = s->block_last_index[n];
2135 if (threshold < 0) {
2137 threshold = -threshold;
2141 /* Are all we could set to zero already zero? */
2142 if (last_index <= skip_dc - 1)
2145 for (i = 0; i <= last_index; i++) {
2146 const int j = s->intra_scantable.permutated[i];
2147 const int level = FFABS(block[j]);
2149 if (skip_dc && i == 0)
2153 } else if (level > 1) {
2159 if (score >= threshold)
2161 for (i = skip_dc; i <= last_index; i++) {
2162 const int j = s->intra_scantable.permutated[i];
2166 s->block_last_index[n] = 0;
2168 s->block_last_index[n] = -1;
2171 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2175 const int maxlevel = s->max_qcoeff;
2176 const int minlevel = s->min_qcoeff;
2180 i = 1; // skip clipping of intra dc
2184 for (; i <= last_index; i++) {
2185 const int j = s->intra_scantable.permutated[i];
2186 int level = block[j];
2188 if (level > maxlevel) {
2191 } else if (level < minlevel) {
2199 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2200 av_log(s->avctx, AV_LOG_INFO,
2201 "warning, clipping %d dct coefficients to %d..%d\n",
2202 overflow, minlevel, maxlevel);
2205 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2209 for (y = 0; y < 8; y++) {
2210 for (x = 0; x < 8; x++) {
2216 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2217 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2218 int v = ptr[x2 + y2 * stride];
2224 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2229 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2230 int motion_x, int motion_y,
2231 int mb_block_height,
2235 int16_t weight[12][64];
2236 int16_t orig[12][64];
2237 const int mb_x = s->mb_x;
2238 const int mb_y = s->mb_y;
2241 int dct_offset = s->linesize * 8; // default for progressive frames
2242 int uv_dct_offset = s->uvlinesize * 8;
2243 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2244 ptrdiff_t wrap_y, wrap_c;
2246 for (i = 0; i < mb_block_count; i++)
2247 skip_dct[i] = s->skipdct;
2249 if (s->adaptive_quant) {
2250 const int last_qp = s->qscale;
2251 const int mb_xy = mb_x + mb_y * s->mb_stride;
2253 s->lambda = s->lambda_table[mb_xy];
2256 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2257 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2258 s->dquant = s->qscale - last_qp;
2260 if (s->out_format == FMT_H263) {
2261 s->dquant = av_clip(s->dquant, -2, 2);
2263 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2265 if (s->pict_type == AV_PICTURE_TYPE_B) {
2266 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2269 if (s->mv_type == MV_TYPE_8X8)
2275 ff_set_qscale(s, last_qp + s->dquant);
2276 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2277 ff_set_qscale(s, s->qscale + s->dquant);
2279 wrap_y = s->linesize;
2280 wrap_c = s->uvlinesize;
2281 ptr_y = s->new_picture.f->data[0] +
2282 (mb_y * 16 * wrap_y) + mb_x * 16;
2283 ptr_cb = s->new_picture.f->data[1] +
2284 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2285 ptr_cr = s->new_picture.f->data[2] +
2286 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2288 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2289 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2290 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2291 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2292 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2294 16, 16, mb_x * 16, mb_y * 16,
2295 s->width, s->height);
2297 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2299 mb_block_width, mb_block_height,
2300 mb_x * mb_block_width, mb_y * mb_block_height,
2302 ptr_cb = ebuf + 16 * wrap_y;
2303 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2305 mb_block_width, mb_block_height,
2306 mb_x * mb_block_width, mb_y * mb_block_height,
2308 ptr_cr = ebuf + 16 * wrap_y + 16;
2312 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2313 int progressive_score, interlaced_score;
2315 s->interlaced_dct = 0;
2316 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2317 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2318 NULL, wrap_y, 8) - 400;
2320 if (progressive_score > 0) {
2321 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2322 NULL, wrap_y * 2, 8) +
2323 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2324 NULL, wrap_y * 2, 8);
2325 if (progressive_score > interlaced_score) {
2326 s->interlaced_dct = 1;
2328 dct_offset = wrap_y;
2329 uv_dct_offset = wrap_c;
2331 if (s->chroma_format == CHROMA_422 ||
2332 s->chroma_format == CHROMA_444)
2338 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2339 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2340 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2341 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2343 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2347 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2348 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2349 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2350 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2351 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2352 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2353 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2354 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2355 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2356 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2357 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2358 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2362 op_pixels_func (*op_pix)[4];
2363 qpel_mc_func (*op_qpix)[16];
2364 uint8_t *dest_y, *dest_cb, *dest_cr;
2366 dest_y = s->dest[0];
2367 dest_cb = s->dest[1];
2368 dest_cr = s->dest[2];
2370 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2371 op_pix = s->hdsp.put_pixels_tab;
2372 op_qpix = s->qdsp.put_qpel_pixels_tab;
2374 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2375 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2378 if (s->mv_dir & MV_DIR_FORWARD) {
2379 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2380 s->last_picture.f->data,
2382 op_pix = s->hdsp.avg_pixels_tab;
2383 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2385 if (s->mv_dir & MV_DIR_BACKWARD) {
2386 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2387 s->next_picture.f->data,
2391 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2392 int progressive_score, interlaced_score;
2394 s->interlaced_dct = 0;
2395 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2396 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2400 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2401 progressive_score -= 400;
2403 if (progressive_score > 0) {
2404 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2406 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2410 if (progressive_score > interlaced_score) {
2411 s->interlaced_dct = 1;
2413 dct_offset = wrap_y;
2414 uv_dct_offset = wrap_c;
2416 if (s->chroma_format == CHROMA_422)
2422 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2423 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2424 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2425 dest_y + dct_offset, wrap_y);
2426 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2427 dest_y + dct_offset + 8, wrap_y);
2429 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2433 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2434 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2435 if (!s->chroma_y_shift) { /* 422 */
2436 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2437 dest_cb + uv_dct_offset, wrap_c);
2438 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2439 dest_cr + uv_dct_offset, wrap_c);
2442 /* pre quantization */
2443 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2444 2 * s->qscale * s->qscale) {
2446 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2448 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2450 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2451 wrap_y, 8) < 20 * s->qscale)
2453 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2454 wrap_y, 8) < 20 * s->qscale)
2456 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2458 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2460 if (!s->chroma_y_shift) { /* 422 */
2461 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2462 dest_cb + uv_dct_offset,
2463 wrap_c, 8) < 20 * s->qscale)
2465 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2466 dest_cr + uv_dct_offset,
2467 wrap_c, 8) < 20 * s->qscale)
2473 if (s->quantizer_noise_shaping) {
2475 get_visual_weight(weight[0], ptr_y , wrap_y);
2477 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2479 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2481 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2483 get_visual_weight(weight[4], ptr_cb , wrap_c);
2485 get_visual_weight(weight[5], ptr_cr , wrap_c);
2486 if (!s->chroma_y_shift) { /* 422 */
2488 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2491 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2494 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2497 /* DCT & quantize */
2498 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2500 for (i = 0; i < mb_block_count; i++) {
2503 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2504 // FIXME we could decide to change to quantizer instead of
2506 // JS: I don't think that would be a good idea it could lower
2507 // quality instead of improve it. Just INTRADC clipping
2508 // deserves changes in quantizer
2510 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2512 s->block_last_index[i] = -1;
2514 if (s->quantizer_noise_shaping) {
2515 for (i = 0; i < mb_block_count; i++) {
2517 s->block_last_index[i] =
2518 dct_quantize_refine(s, s->block[i], weight[i],
2519 orig[i], i, s->qscale);
2524 if (s->luma_elim_threshold && !s->mb_intra)
2525 for (i = 0; i < 4; i++)
2526 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2527 if (s->chroma_elim_threshold && !s->mb_intra)
2528 for (i = 4; i < mb_block_count; i++)
2529 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2531 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2532 for (i = 0; i < mb_block_count; i++) {
2533 if (s->block_last_index[i] == -1)
2534 s->coded_score[i] = INT_MAX / 256;
2539 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2540 s->block_last_index[4] =
2541 s->block_last_index[5] = 0;
2543 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2544 if (!s->chroma_y_shift) { /* 422 / 444 */
2545 for (i=6; i<12; i++) {
2546 s->block_last_index[i] = 0;
2547 s->block[i][0] = s->block[4][0];
2552 // non c quantize code returns incorrect block_last_index FIXME
2553 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2554 for (i = 0; i < mb_block_count; i++) {
2556 if (s->block_last_index[i] > 0) {
2557 for (j = 63; j > 0; j--) {
2558 if (s->block[i][s->intra_scantable.permutated[j]])
2561 s->block_last_index[i] = j;
2566 /* huffman encode */
2567 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2568 case AV_CODEC_ID_MPEG1VIDEO:
2569 case AV_CODEC_ID_MPEG2VIDEO:
2570 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2571 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2573 case AV_CODEC_ID_MPEG4:
2574 if (CONFIG_MPEG4_ENCODER)
2575 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2577 case AV_CODEC_ID_MSMPEG4V2:
2578 case AV_CODEC_ID_MSMPEG4V3:
2579 case AV_CODEC_ID_WMV1:
2580 if (CONFIG_MSMPEG4_ENCODER)
2581 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2583 case AV_CODEC_ID_WMV2:
2584 if (CONFIG_WMV2_ENCODER)
2585 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2587 case AV_CODEC_ID_H261:
2588 if (CONFIG_H261_ENCODER)
2589 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2591 case AV_CODEC_ID_H263:
2592 case AV_CODEC_ID_H263P:
2593 case AV_CODEC_ID_FLV1:
2594 case AV_CODEC_ID_RV10:
2595 case AV_CODEC_ID_RV20:
2596 if (CONFIG_H263_ENCODER)
2597 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2599 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2600 case AV_CODEC_ID_MJPEG:
2601 case AV_CODEC_ID_AMV:
2602 ff_mjpeg_encode_mb(s, s->block);
2605 case AV_CODEC_ID_SPEEDHQ:
2606 if (CONFIG_SPEEDHQ_ENCODER)
2607 ff_speedhq_encode_mb(s, s->block);
2614 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2616 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2617 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2618 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2621 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2624 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2627 d->mb_skip_run= s->mb_skip_run;
2629 d->last_dc[i] = s->last_dc[i];
2632 d->mv_bits= s->mv_bits;
2633 d->i_tex_bits= s->i_tex_bits;
2634 d->p_tex_bits= s->p_tex_bits;
2635 d->i_count= s->i_count;
2636 d->f_count= s->f_count;
2637 d->b_count= s->b_count;
2638 d->skip_count= s->skip_count;
2639 d->misc_bits= s->misc_bits;
2643 d->qscale= s->qscale;
2644 d->dquant= s->dquant;
2646 d->esc3_level_length= s->esc3_level_length;
2649 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2652 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2653 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2656 d->mb_skip_run= s->mb_skip_run;
2658 d->last_dc[i] = s->last_dc[i];
2661 d->mv_bits= s->mv_bits;
2662 d->i_tex_bits= s->i_tex_bits;
2663 d->p_tex_bits= s->p_tex_bits;
2664 d->i_count= s->i_count;
2665 d->f_count= s->f_count;
2666 d->b_count= s->b_count;
2667 d->skip_count= s->skip_count;
2668 d->misc_bits= s->misc_bits;
2670 d->mb_intra= s->mb_intra;
2671 d->mb_skipped= s->mb_skipped;
2672 d->mv_type= s->mv_type;
2673 d->mv_dir= s->mv_dir;
2675 if(s->data_partitioning){
2677 d->tex_pb= s->tex_pb;
2681 d->block_last_index[i]= s->block_last_index[i];
2682 d->interlaced_dct= s->interlaced_dct;
2683 d->qscale= s->qscale;
2685 d->esc3_level_length= s->esc3_level_length;
2688 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2689 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2690 int *dmin, int *next_block, int motion_x, int motion_y)
2693 uint8_t *dest_backup[3];
2695 copy_context_before_encode(s, backup, type);
2697 s->block= s->blocks[*next_block];
2698 s->pb= pb[*next_block];
2699 if(s->data_partitioning){
2700 s->pb2 = pb2 [*next_block];
2701 s->tex_pb= tex_pb[*next_block];
2705 memcpy(dest_backup, s->dest, sizeof(s->dest));
2706 s->dest[0] = s->sc.rd_scratchpad;
2707 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2708 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2709 av_assert0(s->linesize >= 32); //FIXME
2712 encode_mb(s, motion_x, motion_y);
2714 score= put_bits_count(&s->pb);
2715 if(s->data_partitioning){
2716 score+= put_bits_count(&s->pb2);
2717 score+= put_bits_count(&s->tex_pb);
2720 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2721 ff_mpv_reconstruct_mb(s, s->block);
2723 score *= s->lambda2;
2724 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2728 memcpy(s->dest, dest_backup, sizeof(s->dest));
2735 copy_context_after_encode(best, s, type);
2739 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2740 const uint32_t *sq = ff_square_tab + 256;
2745 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2746 else if(w==8 && h==8)
2747 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2751 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2760 static int sse_mb(MpegEncContext *s){
2764 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2765 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2768 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2769 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2770 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2771 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2773 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2774 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2775 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2778 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2779 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2780 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2783 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2784 MpegEncContext *s= *(void**)arg;
2788 s->me.dia_size= s->avctx->pre_dia_size;
2789 s->first_slice_line=1;
2790 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2791 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2792 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2794 s->first_slice_line=0;
2802 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2803 MpegEncContext *s= *(void**)arg;
2805 s->me.dia_size= s->avctx->dia_size;
2806 s->first_slice_line=1;
2807 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2808 s->mb_x=0; //for block init below
2809 ff_init_block_index(s);
2810 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2811 s->block_index[0]+=2;
2812 s->block_index[1]+=2;
2813 s->block_index[2]+=2;
2814 s->block_index[3]+=2;
2816 /* compute motion vector & mb_type and store in context */
2817 if(s->pict_type==AV_PICTURE_TYPE_B)
2818 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2820 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2822 s->first_slice_line=0;
2827 static int mb_var_thread(AVCodecContext *c, void *arg){
2828 MpegEncContext *s= *(void**)arg;
2831 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2832 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2835 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2837 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2839 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2840 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2842 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2843 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2844 s->me.mb_var_sum_temp += varc;
2850 static void write_slice_end(MpegEncContext *s){
2851 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2852 if(s->partitioned_frame){
2853 ff_mpeg4_merge_partitions(s);
2856 ff_mpeg4_stuffing(&s->pb);
2857 } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2858 s->out_format == FMT_MJPEG) {
2859 ff_mjpeg_encode_stuffing(s);
2860 } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2861 ff_speedhq_end_slice(s);
2864 flush_put_bits(&s->pb);
2866 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2867 s->misc_bits+= get_bits_diff(s);
2870 static void write_mb_info(MpegEncContext *s)
2872 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2873 int offset = put_bits_count(&s->pb);
2874 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2875 int gobn = s->mb_y / s->gob_index;
2877 if (CONFIG_H263_ENCODER)
2878 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2879 bytestream_put_le32(&ptr, offset);
2880 bytestream_put_byte(&ptr, s->qscale);
2881 bytestream_put_byte(&ptr, gobn);
2882 bytestream_put_le16(&ptr, mba);
2883 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2884 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2885 /* 4MV not implemented */
2886 bytestream_put_byte(&ptr, 0); /* hmv2 */
2887 bytestream_put_byte(&ptr, 0); /* vmv2 */
2890 static void update_mb_info(MpegEncContext *s, int startcode)
2894 if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2895 s->mb_info_size += 12;
2896 s->prev_mb_info = s->last_mb_info;
2899 s->prev_mb_info = put_bytes_count(&s->pb, 0);
2900 /* This might have incremented mb_info_size above, and we return without
2901 * actually writing any info into that slot yet. But in that case,
2902 * this will be called again at the start of the after writing the
2903 * start code, actually writing the mb info. */
2907 s->last_mb_info = put_bytes_count(&s->pb, 0);
2908 if (!s->mb_info_size)
2909 s->mb_info_size += 12;
2913 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2915 if (put_bytes_left(&s->pb, 0) < threshold
2916 && s->slice_context_count == 1
2917 && s->pb.buf == s->avctx->internal->byte_buffer) {
2918 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2919 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2921 uint8_t *new_buffer = NULL;
2922 int new_buffer_size = 0;
2924 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2925 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2926 return AVERROR(ENOMEM);
2931 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2932 s->avctx->internal->byte_buffer_size + size_increase);
2934 return AVERROR(ENOMEM);
2936 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2937 av_free(s->avctx->internal->byte_buffer);
2938 s->avctx->internal->byte_buffer = new_buffer;
2939 s->avctx->internal->byte_buffer_size = new_buffer_size;
2940 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2941 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2942 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2944 if (put_bytes_left(&s->pb, 0) < threshold)
2945 return AVERROR(EINVAL);
2949 static int encode_thread(AVCodecContext *c, void *arg){
2950 MpegEncContext *s= *(void**)arg;
2951 int mb_x, mb_y, mb_y_order;
2952 int chr_h= 16>>s->chroma_y_shift;
2954 MpegEncContext best_s = { 0 }, backup_s;
2955 uint8_t bit_buf[2][MAX_MB_BYTES];
2956 uint8_t bit_buf2[2][MAX_MB_BYTES];
2957 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2958 PutBitContext pb[2], pb2[2], tex_pb[2];
2961 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2962 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2963 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2966 s->last_bits= put_bits_count(&s->pb);
2977 /* init last dc values */
2978 /* note: quant matrix value (8) is implied here */
2979 s->last_dc[i] = 128 << s->intra_dc_precision;
2981 s->current_picture.encoding_error[i] = 0;
2983 if(s->codec_id==AV_CODEC_ID_AMV){
2984 s->last_dc[0] = 128*8/13;
2985 s->last_dc[1] = 128*8/14;
2986 s->last_dc[2] = 128*8/14;
2989 memset(s->last_mv, 0, sizeof(s->last_mv));
2993 switch(s->codec_id){
2994 case AV_CODEC_ID_H263:
2995 case AV_CODEC_ID_H263P:
2996 case AV_CODEC_ID_FLV1:
2997 if (CONFIG_H263_ENCODER)
2998 s->gob_index = H263_GOB_HEIGHT(s->height);
3000 case AV_CODEC_ID_MPEG4:
3001 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
3002 ff_mpeg4_init_partitions(s);
3008 s->first_slice_line = 1;
3009 s->ptr_lastgob = s->pb.buf;
3010 for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
3011 if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
3013 mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
3014 if (first_in_slice && mb_y_order != s->start_mb_y)
3015 ff_speedhq_end_slice(s);
3016 s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
3023 ff_set_qscale(s, s->qscale);
3024 ff_init_block_index(s);
3026 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3027 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3028 int mb_type= s->mb_type[xy];
3032 int size_increase = s->avctx->internal->byte_buffer_size/4
3033 + s->mb_width*MAX_MB_BYTES;
3035 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
3036 if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
3037 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3040 if(s->data_partitioning){
3041 if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
3042 put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
3043 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3049 s->mb_y = mb_y; // moved into loop, can get changed by H.261
3050 ff_update_block_index(s);
3052 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3053 ff_h261_reorder_mb_index(s);
3054 xy= s->mb_y*s->mb_stride + s->mb_x;
3055 mb_type= s->mb_type[xy];
3058 /* write gob / video packet header */
3060 int current_packet_size, is_gob_start;
3062 current_packet_size = put_bytes_count(&s->pb, 1)
3063 - (s->ptr_lastgob - s->pb.buf);
3065 is_gob_start = s->rtp_payload_size &&
3066 current_packet_size >= s->rtp_payload_size &&
3069 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3071 switch(s->codec_id){
3072 case AV_CODEC_ID_H263:
3073 case AV_CODEC_ID_H263P:
3074 if(!s->h263_slice_structured)
3075 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3077 case AV_CODEC_ID_MPEG2VIDEO:
3078 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3079 case AV_CODEC_ID_MPEG1VIDEO:
3080 if(s->mb_skip_run) is_gob_start=0;
3082 case AV_CODEC_ID_MJPEG:
3083 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3088 if(s->start_mb_y != mb_y || mb_x!=0){
3091 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3092 ff_mpeg4_init_partitions(s);
3096 av_assert2((put_bits_count(&s->pb)&7) == 0);
3097 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3099 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3100 int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
3101 int d = 100 / s->error_rate;
3103 current_packet_size=0;
3104 s->pb.buf_ptr= s->ptr_lastgob;
3105 av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3109 #if FF_API_RTP_CALLBACK
3110 FF_DISABLE_DEPRECATION_WARNINGS
3111 if (s->avctx->rtp_callback){
3112 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3113 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3115 FF_ENABLE_DEPRECATION_WARNINGS
3117 update_mb_info(s, 1);
3119 switch(s->codec_id){
3120 case AV_CODEC_ID_MPEG4:
3121 if (CONFIG_MPEG4_ENCODER) {
3122 ff_mpeg4_encode_video_packet_header(s);
3123 ff_mpeg4_clean_buffers(s);
3126 case AV_CODEC_ID_MPEG1VIDEO:
3127 case AV_CODEC_ID_MPEG2VIDEO:
3128 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3129 ff_mpeg1_encode_slice_header(s);
3130 ff_mpeg1_clean_buffers(s);
3133 case AV_CODEC_ID_H263:
3134 case AV_CODEC_ID_H263P:
3135 if (CONFIG_H263_ENCODER)
3136 ff_h263_encode_gob_header(s, mb_y);
3140 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3141 int bits= put_bits_count(&s->pb);
3142 s->misc_bits+= bits - s->last_bits;
3146 s->ptr_lastgob += current_packet_size;
3147 s->first_slice_line=1;
3148 s->resync_mb_x=mb_x;
3149 s->resync_mb_y=mb_y;
3153 if( (s->resync_mb_x == s->mb_x)
3154 && s->resync_mb_y+1 == s->mb_y){
3155 s->first_slice_line=0;
3159 s->dquant=0; //only for QP_RD
3161 update_mb_info(s, 0);
3163 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3165 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3167 copy_context_before_encode(&backup_s, s, -1);
3169 best_s.data_partitioning= s->data_partitioning;
3170 best_s.partitioned_frame= s->partitioned_frame;
3171 if(s->data_partitioning){
3172 backup_s.pb2= s->pb2;
3173 backup_s.tex_pb= s->tex_pb;
3176 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3177 s->mv_dir = MV_DIR_FORWARD;
3178 s->mv_type = MV_TYPE_16X16;
3180 s->mv[0][0][0] = s->p_mv_table[xy][0];
3181 s->mv[0][0][1] = s->p_mv_table[xy][1];
3182 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3183 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3185 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3186 s->mv_dir = MV_DIR_FORWARD;
3187 s->mv_type = MV_TYPE_FIELD;
3190 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3191 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3192 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3194 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3195 &dmin, &next_block, 0, 0);
3197 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3198 s->mv_dir = MV_DIR_FORWARD;
3199 s->mv_type = MV_TYPE_16X16;
3203 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3204 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3206 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3207 s->mv_dir = MV_DIR_FORWARD;
3208 s->mv_type = MV_TYPE_8X8;
3211 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3212 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3214 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3215 &dmin, &next_block, 0, 0);
3217 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3218 s->mv_dir = MV_DIR_FORWARD;
3219 s->mv_type = MV_TYPE_16X16;
3221 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3222 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3223 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3224 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3226 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3227 s->mv_dir = MV_DIR_BACKWARD;
3228 s->mv_type = MV_TYPE_16X16;
3230 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3231 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3232 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3233 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3235 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3236 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3237 s->mv_type = MV_TYPE_16X16;
3239 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3240 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3241 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3242 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3243 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3244 &dmin, &next_block, 0, 0);
3246 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3247 s->mv_dir = MV_DIR_FORWARD;
3248 s->mv_type = MV_TYPE_FIELD;
3251 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3252 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3253 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3255 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3256 &dmin, &next_block, 0, 0);
3258 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3259 s->mv_dir = MV_DIR_BACKWARD;
3260 s->mv_type = MV_TYPE_FIELD;
3263 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3264 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3265 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3267 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3268 &dmin, &next_block, 0, 0);
3270 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3271 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3272 s->mv_type = MV_TYPE_FIELD;
3274 for(dir=0; dir<2; dir++){
3276 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3277 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3278 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3281 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3282 &dmin, &next_block, 0, 0);
3284 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3286 s->mv_type = MV_TYPE_16X16;
3290 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3291 &dmin, &next_block, 0, 0);
3292 if(s->h263_pred || s->h263_aic){
3294 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3296 ff_clean_intra_table_entries(s); //old mode?
3300 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3301 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3302 const int last_qp= backup_s.qscale;
3305 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3306 static const int dquant_tab[4]={-1,1,-2,2};
3307 int storecoefs = s->mb_intra && s->dc_val[0];
3309 av_assert2(backup_s.dquant == 0);
3312 s->mv_dir= best_s.mv_dir;
3313 s->mv_type = MV_TYPE_16X16;
3314 s->mb_intra= best_s.mb_intra;
3315 s->mv[0][0][0] = best_s.mv[0][0][0];
3316 s->mv[0][0][1] = best_s.mv[0][0][1];
3317 s->mv[1][0][0] = best_s.mv[1][0][0];
3318 s->mv[1][0][1] = best_s.mv[1][0][1];
3320 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3321 for(; qpi<4; qpi++){
3322 int dquant= dquant_tab[qpi];
3323 qp= last_qp + dquant;
3324 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3326 backup_s.dquant= dquant;
3329 dc[i]= s->dc_val[0][ s->block_index[i] ];
3330 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3334 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3335 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3336 if(best_s.qscale != qp){
3339 s->dc_val[0][ s->block_index[i] ]= dc[i];
3340 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3347 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3348 int mx= s->b_direct_mv_table[xy][0];
3349 int my= s->b_direct_mv_table[xy][1];
3351 backup_s.dquant = 0;
3352 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3354 ff_mpeg4_set_direct_mv(s, mx, my);
3355 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3356 &dmin, &next_block, mx, my);
3358 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3359 backup_s.dquant = 0;
3360 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3362 ff_mpeg4_set_direct_mv(s, 0, 0);
3363 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3364 &dmin, &next_block, 0, 0);
3366 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3369 coded |= s->block_last_index[i];
3372 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3373 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3374 mx=my=0; //FIXME find the one we actually used
3375 ff_mpeg4_set_direct_mv(s, mx, my);
3376 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3384 s->mv_dir= best_s.mv_dir;
3385 s->mv_type = best_s.mv_type;
3387 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3388 s->mv[0][0][1] = best_s.mv[0][0][1];
3389 s->mv[1][0][0] = best_s.mv[1][0][0];
3390 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3393 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3394 &dmin, &next_block, mx, my);
3399 s->current_picture.qscale_table[xy] = best_s.qscale;
3401 copy_context_after_encode(s, &best_s, -1);
3403 pb_bits_count= put_bits_count(&s->pb);
3404 flush_put_bits(&s->pb);
3405 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3408 if(s->data_partitioning){
3409 pb2_bits_count= put_bits_count(&s->pb2);
3410 flush_put_bits(&s->pb2);
3411 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3412 s->pb2= backup_s.pb2;
3414 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3415 flush_put_bits(&s->tex_pb);
3416 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3417 s->tex_pb= backup_s.tex_pb;
3419 s->last_bits= put_bits_count(&s->pb);
3421 if (CONFIG_H263_ENCODER &&
3422 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3423 ff_h263_update_motion_val(s);
3425 if(next_block==0){ //FIXME 16 vs linesize16
3426 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3427 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3428 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3431 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3432 ff_mpv_reconstruct_mb(s, s->block);
3434 int motion_x = 0, motion_y = 0;
3435 s->mv_type=MV_TYPE_16X16;
3436 // only one MB-Type possible
3439 case CANDIDATE_MB_TYPE_INTRA:
3442 motion_x= s->mv[0][0][0] = 0;
3443 motion_y= s->mv[0][0][1] = 0;
3445 case CANDIDATE_MB_TYPE_INTER:
3446 s->mv_dir = MV_DIR_FORWARD;
3448 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3449 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3451 case CANDIDATE_MB_TYPE_INTER_I:
3452 s->mv_dir = MV_DIR_FORWARD;
3453 s->mv_type = MV_TYPE_FIELD;
3456 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3457 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3458 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3461 case CANDIDATE_MB_TYPE_INTER4V:
3462 s->mv_dir = MV_DIR_FORWARD;
3463 s->mv_type = MV_TYPE_8X8;
3466 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3467 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3470 case CANDIDATE_MB_TYPE_DIRECT:
3471 if (CONFIG_MPEG4_ENCODER) {
3472 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3474 motion_x=s->b_direct_mv_table[xy][0];
3475 motion_y=s->b_direct_mv_table[xy][1];
3476 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3479 case CANDIDATE_MB_TYPE_DIRECT0:
3480 if (CONFIG_MPEG4_ENCODER) {
3481 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3483 ff_mpeg4_set_direct_mv(s, 0, 0);
3486 case CANDIDATE_MB_TYPE_BIDIR:
3487 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3489 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3490 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3491 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3492 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3494 case CANDIDATE_MB_TYPE_BACKWARD:
3495 s->mv_dir = MV_DIR_BACKWARD;
3497 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3498 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3500 case CANDIDATE_MB_TYPE_FORWARD:
3501 s->mv_dir = MV_DIR_FORWARD;
3503 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3504 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3506 case CANDIDATE_MB_TYPE_FORWARD_I:
3507 s->mv_dir = MV_DIR_FORWARD;
3508 s->mv_type = MV_TYPE_FIELD;
3511 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3512 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3513 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3516 case CANDIDATE_MB_TYPE_BACKWARD_I:
3517 s->mv_dir = MV_DIR_BACKWARD;
3518 s->mv_type = MV_TYPE_FIELD;
3521 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3522 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3523 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3526 case CANDIDATE_MB_TYPE_BIDIR_I:
3527 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3528 s->mv_type = MV_TYPE_FIELD;
3530 for(dir=0; dir<2; dir++){
3532 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3533 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3534 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3539 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3542 encode_mb(s, motion_x, motion_y);
3544 // RAL: Update last macroblock type
3545 s->last_mv_dir = s->mv_dir;
3547 if (CONFIG_H263_ENCODER &&
3548 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3549 ff_h263_update_motion_val(s);
3551 ff_mpv_reconstruct_mb(s, s->block);
3554 /* clean the MV table in IPS frames for direct mode in B-frames */
3555 if(s->mb_intra /* && I,P,S_TYPE */){
3556 s->p_mv_table[xy][0]=0;
3557 s->p_mv_table[xy][1]=0;
3560 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3564 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3565 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3567 s->current_picture.encoding_error[0] += sse(
3568 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3569 s->dest[0], w, h, s->linesize);
3570 s->current_picture.encoding_error[1] += sse(
3571 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3572 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3573 s->current_picture.encoding_error[2] += sse(
3574 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3575 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3578 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3579 ff_h263_loop_filter(s);
3581 ff_dlog(s->avctx, "MB %d %d bits\n",
3582 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3586 //not beautiful here but we must write it before flushing so it has to be here
3587 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3588 ff_msmpeg4_encode_ext_header(s);
3592 #if FF_API_RTP_CALLBACK
3593 FF_DISABLE_DEPRECATION_WARNINGS
3594 /* Send the last GOB if RTP */
3595 if (s->avctx->rtp_callback) {
3596 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3597 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3598 /* Call the RTP callback to send the last GOB */
3600 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3602 FF_ENABLE_DEPRECATION_WARNINGS
3608 #define MERGE(field) dst->field += src->field; src->field=0
3609 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3610 MERGE(me.scene_change_score);
3611 MERGE(me.mc_mb_var_sum_temp);
3612 MERGE(me.mb_var_sum_temp);
3615 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3618 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3619 MERGE(dct_count[1]);
3628 MERGE(er.error_count);
3629 MERGE(padding_bug_score);
3630 MERGE(current_picture.encoding_error[0]);
3631 MERGE(current_picture.encoding_error[1]);
3632 MERGE(current_picture.encoding_error[2]);
3634 if (dst->noise_reduction){
3635 for(i=0; i<64; i++){
3636 MERGE(dct_error_sum[0][i]);
3637 MERGE(dct_error_sum[1][i]);
3641 av_assert1(put_bits_count(&src->pb) % 8 ==0);
3642 av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3643 ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3644 flush_put_bits(&dst->pb);
3647 static int estimate_qp(MpegEncContext *s, int dry_run){
3648 if (s->next_lambda){
3649 s->current_picture_ptr->f->quality =
3650 s->current_picture.f->quality = s->next_lambda;
3651 if(!dry_run) s->next_lambda= 0;
3652 } else if (!s->fixed_qscale) {
3653 int quality = ff_rate_estimate_qscale(s, dry_run);
3654 s->current_picture_ptr->f->quality =
3655 s->current_picture.f->quality = quality;
3656 if (s->current_picture.f->quality < 0)
3660 if(s->adaptive_quant){
3661 switch(s->codec_id){
3662 case AV_CODEC_ID_MPEG4:
3663 if (CONFIG_MPEG4_ENCODER)
3664 ff_clean_mpeg4_qscales(s);
3666 case AV_CODEC_ID_H263:
3667 case AV_CODEC_ID_H263P:
3668 case AV_CODEC_ID_FLV1:
3669 if (CONFIG_H263_ENCODER)
3670 ff_clean_h263_qscales(s);
3673 ff_init_qscale_tab(s);
3676 s->lambda= s->lambda_table[0];
3679 s->lambda = s->current_picture.f->quality;
3684 /* must be called before writing the header */
3685 static void set_frame_distances(MpegEncContext * s){
3686 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3687 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3689 if(s->pict_type==AV_PICTURE_TYPE_B){
3690 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3691 av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3693 s->pp_time= s->time - s->last_non_b_time;
3694 s->last_non_b_time= s->time;
3695 av_assert1(s->picture_number==0 || s->pp_time > 0);
3699 static int encode_picture(MpegEncContext *s, int picture_number)
3703 int context_count = s->slice_context_count;
3705 s->picture_number = picture_number;
3707 /* Reset the average MB variance */
3708 s->me.mb_var_sum_temp =
3709 s->me.mc_mb_var_sum_temp = 0;
3711 /* we need to initialize some time vars before we can encode B-frames */
3712 // RAL: Condition added for MPEG1VIDEO
3713 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3714 set_frame_distances(s);
3715 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3716 ff_set_mpeg4_time(s);
3718 s->me.scene_change_score=0;
3720 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3722 if(s->pict_type==AV_PICTURE_TYPE_I){
3723 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3724 else s->no_rounding=0;
3725 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3726 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3727 s->no_rounding ^= 1;
3730 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3731 if (estimate_qp(s,1) < 0)
3733 ff_get_2pass_fcode(s);
3734 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3735 if(s->pict_type==AV_PICTURE_TYPE_B)
3736 s->lambda= s->last_lambda_for[s->pict_type];
3738 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3742 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3743 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3744 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3745 s->q_chroma_intra_matrix = s->q_intra_matrix;
3746 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3749 s->mb_intra=0; //for the rate distortion & bit compare functions
3750 for(i=1; i<context_count; i++){
3751 ret = ff_update_duplicate_context(s->thread_context[i], s);
3759 /* Estimate motion for every MB */
3760 if(s->pict_type != AV_PICTURE_TYPE_I){
3761 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3762 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3763 if (s->pict_type != AV_PICTURE_TYPE_B) {
3764 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3766 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3770 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3771 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3773 for(i=0; i<s->mb_stride*s->mb_height; i++)
3774 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3776 if(!s->fixed_qscale){
3777 /* finding spatial complexity for I-frame rate control */
3778 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3781 for(i=1; i<context_count; i++){
3782 merge_context_after_me(s, s->thread_context[i]);
3784 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3785 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3788 if (s->me.scene_change_score > s->scenechange_threshold &&
3789 s->pict_type == AV_PICTURE_TYPE_P) {
3790 s->pict_type= AV_PICTURE_TYPE_I;
3791 for(i=0; i<s->mb_stride*s->mb_height; i++)
3792 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3793 if(s->msmpeg4_version >= 3)
3795 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3796 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3800 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3801 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3803 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3805 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3806 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3807 s->f_code= FFMAX3(s->f_code, a, b);
3810 ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
3811 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3812 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3816 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3817 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3822 if(s->pict_type==AV_PICTURE_TYPE_B){
3825 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3826 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3827 s->f_code = FFMAX(a, b);
3829 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3830 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3831 s->b_code = FFMAX(a, b);
3833 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3834 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3835 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3836 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3837 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3839 for(dir=0; dir<2; dir++){
3842 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3843 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3844 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3845 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3853 if (estimate_qp(s, 0) < 0)
3856 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3857 s->pict_type == AV_PICTURE_TYPE_I &&
3858 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3859 s->qscale= 3; //reduce clipping problems
3861 if (s->out_format == FMT_MJPEG) {
3862 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3863 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3865 if (s->avctx->intra_matrix) {
3867 luma_matrix = s->avctx->intra_matrix;
3869 if (s->avctx->chroma_intra_matrix)
3870 chroma_matrix = s->avctx->chroma_intra_matrix;
3872 /* for mjpeg, we do include qscale in the matrix */
3874 int j = s->idsp.idct_permutation[i];
3876 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3877 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3879 s->y_dc_scale_table=
3880 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3881 s->chroma_intra_matrix[0] =
3882 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3883 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3884 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3885 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3886 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3889 if(s->codec_id == AV_CODEC_ID_AMV){
3890 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3891 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3893 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3895 s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3896 s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3898 s->y_dc_scale_table= y;
3899 s->c_dc_scale_table= c;
3900 s->intra_matrix[0] = 13;
3901 s->chroma_intra_matrix[0] = 14;
3902 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3903 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3904 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3905 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3909 if (s->out_format == FMT_SPEEDHQ) {
3910 s->y_dc_scale_table=
3911 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3914 //FIXME var duplication
3915 s->current_picture_ptr->f->key_frame =
3916 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3917 s->current_picture_ptr->f->pict_type =
3918 s->current_picture.f->pict_type = s->pict_type;
3920 if (s->current_picture.f->key_frame)
3921 s->picture_in_gop_number=0;
3923 s->mb_x = s->mb_y = 0;
3924 s->last_bits= put_bits_count(&s->pb);
3925 switch(s->out_format) {
3926 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3928 /* s->huffman == HUFFMAN_TABLE_OPTIMAL can only be true for MJPEG. */
3929 if (!CONFIG_MJPEG_ENCODER || s->huffman != HUFFMAN_TABLE_OPTIMAL)
3930 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3931 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3935 if (CONFIG_SPEEDHQ_ENCODER)
3936 ff_speedhq_encode_picture_header(s);
3939 if (CONFIG_H261_ENCODER)
3940 ff_h261_encode_picture_header(s, picture_number);
3943 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3944 ff_wmv2_encode_picture_header(s, picture_number);
3945 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3946 ff_msmpeg4_encode_picture_header(s, picture_number);
3947 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3948 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3951 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3952 ret = ff_rv10_encode_picture_header(s, picture_number);
3956 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3957 ff_rv20_encode_picture_header(s, picture_number);
3958 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3959 ff_flv_encode_picture_header(s, picture_number);
3960 else if (CONFIG_H263_ENCODER)
3961 ff_h263_encode_picture_header(s, picture_number);
3964 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3965 ff_mpeg1_encode_picture_header(s, picture_number);
3970 bits= put_bits_count(&s->pb);
3971 s->header_bits= bits - s->last_bits;
3973 for(i=1; i<context_count; i++){
3974 update_duplicate_context_after_me(s->thread_context[i], s);
3976 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3977 for(i=1; i<context_count; i++){
3978 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3979 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3980 merge_context_after_encode(s, s->thread_context[i]);
3986 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3987 const int intra= s->mb_intra;
3990 s->dct_count[intra]++;
3992 for(i=0; i<64; i++){
3993 int level= block[i];
3997 s->dct_error_sum[intra][i] += level;
3998 level -= s->dct_offset[intra][i];
3999 if(level<0) level=0;
4001 s->dct_error_sum[intra][i] -= level;
4002 level += s->dct_offset[intra][i];
4003 if(level>0) level=0;
4010 static int dct_quantize_trellis_c(MpegEncContext *s,
4011 int16_t *block, int n,
4012 int qscale, int *overflow){
4014 const uint16_t *matrix;
4015 const uint8_t *scantable;
4016 const uint8_t *perm_scantable;
4018 unsigned int threshold1, threshold2;
4030 int coeff_count[64];
4031 int qmul, qadd, start_i, last_non_zero, i, dc;
4032 const int esc_length= s->ac_esc_length;
4034 uint8_t * last_length;
4035 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4038 s->fdsp.fdct(block);
4040 if(s->dct_error_sum)
4041 s->denoise_dct(s, block);
4043 qadd= ((qscale-1)|1)*8;
4045 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4046 else mpeg2_qscale = qscale << 1;
4050 scantable= s->intra_scantable.scantable;
4051 perm_scantable= s->intra_scantable.permutated;
4059 /* For AIC we skip quant/dequant of INTRADC */
4064 /* note: block[0] is assumed to be positive */
4065 block[0] = (block[0] + (q >> 1)) / q;
4068 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4069 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4070 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4071 bias= 1<<(QMAT_SHIFT-1);
4073 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4074 length = s->intra_chroma_ac_vlc_length;
4075 last_length= s->intra_chroma_ac_vlc_last_length;
4077 length = s->intra_ac_vlc_length;
4078 last_length= s->intra_ac_vlc_last_length;
4081 scantable= s->inter_scantable.scantable;
4082 perm_scantable= s->inter_scantable.permutated;
4085 qmat = s->q_inter_matrix[qscale];
4086 matrix = s->inter_matrix;
4087 length = s->inter_ac_vlc_length;
4088 last_length= s->inter_ac_vlc_last_length;
4092 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4093 threshold2= (threshold1<<1);
4095 for(i=63; i>=start_i; i--) {
4096 const int j = scantable[i];
4097 int level = block[j] * qmat[j];
4099 if(((unsigned)(level+threshold1))>threshold2){
4105 for(i=start_i; i<=last_non_zero; i++) {
4106 const int j = scantable[i];
4107 int level = block[j] * qmat[j];
4109 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4110 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4111 if(((unsigned)(level+threshold1))>threshold2){
4113 level= (bias + level)>>QMAT_SHIFT;
4115 coeff[1][i]= level-1;
4116 // coeff[2][k]= level-2;
4118 level= (bias - level)>>QMAT_SHIFT;
4119 coeff[0][i]= -level;
4120 coeff[1][i]= -level+1;
4121 // coeff[2][k]= -level+2;
4123 coeff_count[i]= FFMIN(level, 2);
4124 av_assert2(coeff_count[i]);
4127 coeff[0][i]= (level>>31)|1;
4132 *overflow= s->max_qcoeff < max; //overflow might have happened
4134 if(last_non_zero < start_i){
4135 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4136 return last_non_zero;
4139 score_tab[start_i]= 0;
4140 survivor[0]= start_i;
4143 for(i=start_i; i<=last_non_zero; i++){
4144 int level_index, j, zero_distortion;
4145 int dct_coeff= FFABS(block[ scantable[i] ]);
4146 int best_score=256*256*256*120;
4148 if (s->fdsp.fdct == ff_fdct_ifast)
4149 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4150 zero_distortion= dct_coeff*dct_coeff;
4152 for(level_index=0; level_index < coeff_count[i]; level_index++){
4154 int level= coeff[level_index][i];
4155 const int alevel= FFABS(level);
4160 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4161 unquant_coeff= alevel*qmul + qadd;
4162 } else if(s->out_format == FMT_MJPEG) {
4163 j = s->idsp.idct_permutation[scantable[i]];
4164 unquant_coeff = alevel * matrix[j] * 8;
4166 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4168 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4169 unquant_coeff = (unquant_coeff - 1) | 1;
4171 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4172 unquant_coeff = (unquant_coeff - 1) | 1;
4177 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4179 if((level&(~127)) == 0){
4180 for(j=survivor_count-1; j>=0; j--){
4181 int run= i - survivor[j];
4182 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4183 score += score_tab[i-run];
4185 if(score < best_score){
4188 level_tab[i+1]= level-64;
4192 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4193 for(j=survivor_count-1; j>=0; j--){
4194 int run= i - survivor[j];
4195 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4196 score += score_tab[i-run];
4197 if(score < last_score){
4200 last_level= level-64;
4206 distortion += esc_length*lambda;
4207 for(j=survivor_count-1; j>=0; j--){
4208 int run= i - survivor[j];
4209 int score= distortion + score_tab[i-run];
4211 if(score < best_score){
4214 level_tab[i+1]= level-64;
4218 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4219 for(j=survivor_count-1; j>=0; j--){
4220 int run= i - survivor[j];
4221 int score= distortion + score_tab[i-run];
4222 if(score < last_score){
4225 last_level= level-64;
4233 score_tab[i+1]= best_score;
4235 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4236 if(last_non_zero <= 27){
4237 for(; survivor_count; survivor_count--){
4238 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4242 for(; survivor_count; survivor_count--){
4243 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4248 survivor[ survivor_count++ ]= i+1;
4251 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4252 last_score= 256*256*256*120;
4253 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4254 int score= score_tab[i];
4256 score += lambda * 2; // FIXME more exact?
4258 if(score < last_score){
4261 last_level= level_tab[i];
4262 last_run= run_tab[i];
4267 s->coded_score[n] = last_score;
4269 dc= FFABS(block[0]);
4270 last_non_zero= last_i - 1;
4271 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4273 if(last_non_zero < start_i)
4274 return last_non_zero;
4276 if(last_non_zero == 0 && start_i == 0){
4278 int best_score= dc * dc;
4280 for(i=0; i<coeff_count[0]; i++){
4281 int level= coeff[i][0];
4282 int alevel= FFABS(level);
4283 int unquant_coeff, score, distortion;
4285 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4286 unquant_coeff= (alevel*qmul + qadd)>>3;
4288 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4289 unquant_coeff = (unquant_coeff - 1) | 1;
4291 unquant_coeff = (unquant_coeff + 4) >> 3;
4292 unquant_coeff<<= 3 + 3;
4294 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4296 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4297 else score= distortion + esc_length*lambda;
4299 if(score < best_score){
4301 best_level= level - 64;
4304 block[0]= best_level;
4305 s->coded_score[n] = best_score - dc*dc;
4306 if(best_level == 0) return -1;
4307 else return last_non_zero;
4311 av_assert2(last_level);
4313 block[ perm_scantable[last_non_zero] ]= last_level;
4316 for(; i>start_i; i -= run_tab[i] + 1){
4317 block[ perm_scantable[i-1] ]= level_tab[i];
4320 return last_non_zero;
4323 static int16_t basis[64][64];
4325 static void build_basis(uint8_t *perm){
4332 double s= 0.25*(1<<BASIS_SHIFT);
4334 int perm_index= perm[index];
4335 if(i==0) s*= sqrt(0.5);
4336 if(j==0) s*= sqrt(0.5);
4337 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4344 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4345 int16_t *block, int16_t *weight, int16_t *orig,
4348 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4349 const uint8_t *scantable;
4350 const uint8_t *perm_scantable;
4351 // unsigned int threshold1, threshold2;
4356 int qmul, qadd, start_i, last_non_zero, i, dc;
4358 uint8_t * last_length;
4360 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4362 if(basis[0][0] == 0)
4363 build_basis(s->idsp.idct_permutation);
4368 scantable= s->intra_scantable.scantable;
4369 perm_scantable= s->intra_scantable.permutated;
4376 /* For AIC we skip quant/dequant of INTRADC */
4380 q <<= RECON_SHIFT-3;
4381 /* note: block[0] is assumed to be positive */
4383 // block[0] = (block[0] + (q >> 1)) / q;
4385 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4386 // bias= 1<<(QMAT_SHIFT-1);
4387 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4388 length = s->intra_chroma_ac_vlc_length;
4389 last_length= s->intra_chroma_ac_vlc_last_length;
4391 length = s->intra_ac_vlc_length;
4392 last_length= s->intra_ac_vlc_last_length;
4395 scantable= s->inter_scantable.scantable;
4396 perm_scantable= s->inter_scantable.permutated;
4399 length = s->inter_ac_vlc_length;
4400 last_length= s->inter_ac_vlc_last_length;
4402 last_non_zero = s->block_last_index[n];
4404 dc += (1<<(RECON_SHIFT-1));
4405 for(i=0; i<64; i++){
4406 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4410 for(i=0; i<64; i++){
4415 w= FFABS(weight[i]) + qns*one;
4416 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4419 // w=weight[i] = (63*qns + (w/2)) / w;
4422 av_assert2(w<(1<<6));
4425 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4429 for(i=start_i; i<=last_non_zero; i++){
4430 int j= perm_scantable[i];
4431 const int level= block[j];
4435 if(level<0) coeff= qmul*level - qadd;
4436 else coeff= qmul*level + qadd;
4437 run_tab[rle_index++]=run;
4440 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4447 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4450 int run2, best_unquant_change=0, analyze_gradient;
4451 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4453 if(analyze_gradient){
4454 for(i=0; i<64; i++){
4457 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4463 const int level= block[0];
4464 int change, old_coeff;
4466 av_assert2(s->mb_intra);
4470 for(change=-1; change<=1; change+=2){
4471 int new_level= level + change;
4472 int score, new_coeff;
4474 new_coeff= q*new_level;
4475 if(new_coeff >= 2048 || new_coeff < 0)
4478 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4479 new_coeff - old_coeff);
4480 if(score<best_score){
4483 best_change= change;
4484 best_unquant_change= new_coeff - old_coeff;
4491 run2= run_tab[rle_index++];
4495 for(i=start_i; i<64; i++){
4496 int j= perm_scantable[i];
4497 const int level= block[j];
4498 int change, old_coeff;
4500 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4504 if(level<0) old_coeff= qmul*level - qadd;
4505 else old_coeff= qmul*level + qadd;
4506 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4510 av_assert2(run2>=0 || i >= last_non_zero );
4513 for(change=-1; change<=1; change+=2){
4514 int new_level= level + change;
4515 int score, new_coeff, unquant_change;
4518 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4522 if(new_level<0) new_coeff= qmul*new_level - qadd;
4523 else new_coeff= qmul*new_level + qadd;
4524 if(new_coeff >= 2048 || new_coeff <= -2048)
4526 //FIXME check for overflow
4529 if(level < 63 && level > -63){
4530 if(i < last_non_zero)
4531 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4532 - length[UNI_AC_ENC_INDEX(run, level+64)];
4534 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4535 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4538 av_assert2(FFABS(new_level)==1);
4540 if(analyze_gradient){
4541 int g= d1[ scantable[i] ];
4542 if(g && (g^new_level) >= 0)
4546 if(i < last_non_zero){
4547 int next_i= i + run2 + 1;
4548 int next_level= block[ perm_scantable[next_i] ] + 64;
4550 if(next_level&(~127))
4553 if(next_i < last_non_zero)
4554 score += length[UNI_AC_ENC_INDEX(run, 65)]
4555 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4556 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4558 score += length[UNI_AC_ENC_INDEX(run, 65)]
4559 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4560 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4562 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4564 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4565 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4571 av_assert2(FFABS(level)==1);
4573 if(i < last_non_zero){
4574 int next_i= i + run2 + 1;
4575 int next_level= block[ perm_scantable[next_i] ] + 64;
4577 if(next_level&(~127))
4580 if(next_i < last_non_zero)
4581 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4582 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4583 - length[UNI_AC_ENC_INDEX(run, 65)];
4585 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4586 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4587 - length[UNI_AC_ENC_INDEX(run, 65)];
4589 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4591 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4592 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4599 unquant_change= new_coeff - old_coeff;
4600 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4602 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4604 if(score<best_score){
4607 best_change= change;
4608 best_unquant_change= unquant_change;
4612 prev_level= level + 64;
4613 if(prev_level&(~127))
4623 int j= perm_scantable[ best_coeff ];
4625 block[j] += best_change;
4627 if(best_coeff > last_non_zero){
4628 last_non_zero= best_coeff;
4629 av_assert2(block[j]);
4631 for(; last_non_zero>=start_i; last_non_zero--){
4632 if(block[perm_scantable[last_non_zero]])
4639 for(i=start_i; i<=last_non_zero; i++){
4640 int j= perm_scantable[i];
4641 const int level= block[j];
4644 run_tab[rle_index++]=run;
4651 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4657 return last_non_zero;
4661 * Permute an 8x8 block according to permutation.
4662 * @param block the block which will be permuted according to
4663 * the given permutation vector
4664 * @param permutation the permutation vector
4665 * @param last the last non zero coefficient in scantable order, used to
4666 * speed the permutation up
4667 * @param scantable the used scantable, this is only used to speed the
4668 * permutation up, the block is not (inverse) permutated
4669 * to scantable order!
4671 void ff_block_permute(int16_t *block, uint8_t *permutation,
4672 const uint8_t *scantable, int last)
4679 //FIXME it is ok but not clean and might fail for some permutations
4680 // if (permutation[1] == 1)
4683 for (i = 0; i <= last; i++) {
4684 const int j = scantable[i];
4689 for (i = 0; i <= last; i++) {
4690 const int j = scantable[i];
4691 const int perm_j = permutation[j];
4692 block[perm_j] = temp[j];
4696 int ff_dct_quantize_c(MpegEncContext *s,
4697 int16_t *block, int n,
4698 int qscale, int *overflow)
4700 int i, j, level, last_non_zero, q, start_i;
4702 const uint8_t *scantable;
4705 unsigned int threshold1, threshold2;
4707 s->fdsp.fdct(block);
4709 if(s->dct_error_sum)
4710 s->denoise_dct(s, block);
4713 scantable= s->intra_scantable.scantable;
4721 /* For AIC we skip quant/dequant of INTRADC */
4724 /* note: block[0] is assumed to be positive */
4725 block[0] = (block[0] + (q >> 1)) / q;
4728 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4729 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4731 scantable= s->inter_scantable.scantable;
4734 qmat = s->q_inter_matrix[qscale];
4735 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4737 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4738 threshold2= (threshold1<<1);
4739 for(i=63;i>=start_i;i--) {
4741 level = block[j] * qmat[j];
4743 if(((unsigned)(level+threshold1))>threshold2){
4750 for(i=start_i; i<=last_non_zero; i++) {
4752 level = block[j] * qmat[j];
4754 // if( bias+level >= (1<<QMAT_SHIFT)
4755 // || bias-level >= (1<<QMAT_SHIFT)){
4756 if(((unsigned)(level+threshold1))>threshold2){
4758 level= (bias + level)>>QMAT_SHIFT;
4761 level= (bias - level)>>QMAT_SHIFT;
4769 *overflow= s->max_qcoeff < max; //overflow might have happened
4771 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4772 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4773 ff_block_permute(block, s->idsp.idct_permutation,
4774 scantable, last_non_zero);
4776 return last_non_zero;
4779 #define OFFSET(x) offsetof(MpegEncContext, x)
4780 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4781 static const AVOption h263_options[] = {
4782 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4783 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4788 static const AVClass h263_class = {
4789 .class_name = "H.263 encoder",
4790 .item_name = av_default_item_name,
4791 .option = h263_options,
4792 .version = LIBAVUTIL_VERSION_INT,
4795 AVCodec ff_h263_encoder = {
4797 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4798 .type = AVMEDIA_TYPE_VIDEO,
4799 .id = AV_CODEC_ID_H263,
4800 .priv_data_size = sizeof(MpegEncContext),
4801 .init = ff_mpv_encode_init,
4802 .encode2 = ff_mpv_encode_picture,
4803 .close = ff_mpv_encode_end,
4804 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4805 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4806 .priv_class = &h263_class,
4809 static const AVOption h263p_options[] = {
4810 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4811 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4812 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4813 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4817 static const AVClass h263p_class = {
4818 .class_name = "H.263p encoder",
4819 .item_name = av_default_item_name,
4820 .option = h263p_options,
4821 .version = LIBAVUTIL_VERSION_INT,
4824 AVCodec ff_h263p_encoder = {
4826 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4827 .type = AVMEDIA_TYPE_VIDEO,
4828 .id = AV_CODEC_ID_H263P,
4829 .priv_data_size = sizeof(MpegEncContext),
4830 .init = ff_mpv_encode_init,
4831 .encode2 = ff_mpv_encode_picture,
4832 .close = ff_mpv_encode_end,
4833 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4834 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4835 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4836 .priv_class = &h263p_class,
4839 static const AVClass msmpeg4v2_class = {
4840 .class_name = "msmpeg4v2 encoder",
4841 .item_name = av_default_item_name,
4842 .option = ff_mpv_generic_options,
4843 .version = LIBAVUTIL_VERSION_INT,
4846 AVCodec ff_msmpeg4v2_encoder = {
4847 .name = "msmpeg4v2",
4848 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4849 .type = AVMEDIA_TYPE_VIDEO,
4850 .id = AV_CODEC_ID_MSMPEG4V2,
4851 .priv_data_size = sizeof(MpegEncContext),
4852 .init = ff_mpv_encode_init,
4853 .encode2 = ff_mpv_encode_picture,
4854 .close = ff_mpv_encode_end,
4855 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4856 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4857 .priv_class = &msmpeg4v2_class,
4860 static const AVClass msmpeg4v3_class = {
4861 .class_name = "msmpeg4v3 encoder",
4862 .item_name = av_default_item_name,
4863 .option = ff_mpv_generic_options,
4864 .version = LIBAVUTIL_VERSION_INT,
4867 AVCodec ff_msmpeg4v3_encoder = {
4869 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4870 .type = AVMEDIA_TYPE_VIDEO,
4871 .id = AV_CODEC_ID_MSMPEG4V3,
4872 .priv_data_size = sizeof(MpegEncContext),
4873 .init = ff_mpv_encode_init,
4874 .encode2 = ff_mpv_encode_picture,
4875 .close = ff_mpv_encode_end,
4876 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4877 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4878 .priv_class = &msmpeg4v3_class,
4881 static const AVClass wmv1_class = {
4882 .class_name = "wmv1 encoder",
4883 .item_name = av_default_item_name,
4884 .option = ff_mpv_generic_options,
4885 .version = LIBAVUTIL_VERSION_INT,
4888 AVCodec ff_wmv1_encoder = {
4890 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4891 .type = AVMEDIA_TYPE_VIDEO,
4892 .id = AV_CODEC_ID_WMV1,
4893 .priv_data_size = sizeof(MpegEncContext),
4894 .init = ff_mpv_encode_init,
4895 .encode2 = ff_mpv_encode_picture,
4896 .close = ff_mpv_encode_end,
4897 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4898 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4899 .priv_class = &wmv1_class,