2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/mem_internal.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/thread.h"
47 #include "mpegvideo.h"
48 #include "mpegvideodata.h"
52 #include "mjpegenc_common.h"
54 #include "mpegutils.h"
56 #include "speedhqenc.h"
58 #include "pixblockdsp.h"
62 #include "aandcttab.h"
64 #include "mpeg4video.h"
66 #include "bytestream.h"
69 #include "packet_internal.h"
73 #define QUANT_BIAS_SHIFT 8
75 #define QMAT_SHIFT_MMX 16
78 static int encode_picture(MpegEncContext *s, int picture_number);
79 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
80 static int sse_mb(MpegEncContext *s);
81 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
82 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
84 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
85 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
87 const AVOption ff_mpv_generic_options[] = {
92 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
93 uint16_t (*qmat16)[2][64],
94 const uint16_t *quant_matrix,
95 int bias, int qmin, int qmax, int intra)
97 FDCTDSPContext *fdsp = &s->fdsp;
101 for (qscale = qmin; qscale <= qmax; qscale++) {
105 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
106 else qscale2 = qscale << 1;
108 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
110 fdsp->fdct == ff_faandct ||
111 #endif /* CONFIG_FAANDCT */
112 fdsp->fdct == ff_jpeg_fdct_islow_10) {
113 for (i = 0; i < 64; i++) {
114 const int j = s->idsp.idct_permutation[i];
115 int64_t den = (int64_t) qscale2 * quant_matrix[j];
116 /* 16 <= qscale * quant_matrix[i] <= 7905
117 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
118 * 19952 <= x <= 249205026
119 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
120 * 3444240 >= (1 << 36) / (x) >= 275 */
122 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
124 } else if (fdsp->fdct == ff_fdct_ifast) {
125 for (i = 0; i < 64; i++) {
126 const int j = s->idsp.idct_permutation[i];
127 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
128 /* 16 <= qscale * quant_matrix[i] <= 7905
129 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
130 * 19952 <= x <= 249205026
131 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
132 * 3444240 >= (1 << 36) / (x) >= 275 */
134 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
137 for (i = 0; i < 64; i++) {
138 const int j = s->idsp.idct_permutation[i];
139 int64_t den = (int64_t) qscale2 * quant_matrix[j];
140 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
141 * Assume x = qscale * quant_matrix[i]
143 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
144 * so 32768 >= (1 << 19) / (x) >= 67 */
145 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
146 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
147 // (qscale * quant_matrix[i]);
148 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
150 if (qmat16[qscale][0][i] == 0 ||
151 qmat16[qscale][0][i] == 128 * 256)
152 qmat16[qscale][0][i] = 128 * 256 - 1;
153 qmat16[qscale][1][i] =
154 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
155 qmat16[qscale][0][i]);
159 for (i = intra; i < 64; i++) {
161 if (fdsp->fdct == ff_fdct_ifast) {
162 max = (8191LL * ff_aanscales[i]) >> 14;
164 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
170 av_log(s->avctx, AV_LOG_INFO,
171 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
176 static inline void update_qscale(MpegEncContext *s)
178 if (s->q_scale_type == 1 && 0) {
180 int bestdiff=INT_MAX;
183 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
184 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
185 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
186 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
188 if (diff < bestdiff) {
195 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
196 (FF_LAMBDA_SHIFT + 7);
197 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
200 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
204 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
210 for (i = 0; i < 64; i++) {
211 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
218 * init s->current_picture.qscale_table from s->lambda_table
220 void ff_init_qscale_tab(MpegEncContext *s)
222 int8_t * const qscale_table = s->current_picture.qscale_table;
225 for (i = 0; i < s->mb_num; i++) {
226 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
227 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
228 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
233 static void update_duplicate_context_after_me(MpegEncContext *dst,
236 #define COPY(a) dst->a= src->a
238 COPY(current_picture);
244 COPY(picture_in_gop_number);
245 COPY(gop_picture_number);
246 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
247 COPY(progressive_frame); // FIXME don't set in encode_header
248 COPY(partitioned_frame); // FIXME don't set in encode_header
252 static void mpv_encode_init_static(void)
254 for (int i = -16; i < 16; i++)
255 default_fcode_tab[i + MAX_MV] = 1;
259 * Set the given MpegEncContext to defaults for encoding.
260 * the changed fields will not depend upon the prior state of the MpegEncContext.
262 static void mpv_encode_defaults(MpegEncContext *s)
264 static AVOnce init_static_once = AV_ONCE_INIT;
266 ff_mpv_common_defaults(s);
268 ff_thread_once(&init_static_once, mpv_encode_init_static);
270 s->me.mv_penalty = default_mv_penalty;
271 s->fcode_tab = default_fcode_tab;
273 s->input_picture_number = 0;
274 s->picture_in_gop_number = 0;
277 av_cold int ff_dct_encode_init(MpegEncContext *s)
280 ff_dct_encode_init_x86(s);
282 if (CONFIG_H263_ENCODER)
283 ff_h263dsp_init(&s->h263dsp);
284 if (!s->dct_quantize)
285 s->dct_quantize = ff_dct_quantize_c;
287 s->denoise_dct = denoise_dct_c;
288 s->fast_dct_quantize = s->dct_quantize;
289 if (s->avctx->trellis)
290 s->dct_quantize = dct_quantize_trellis_c;
295 /* init video encoder */
296 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
298 MpegEncContext *s = avctx->priv_data;
299 AVCPBProperties *cpb_props;
300 int i, ret, format_supported;
302 mpv_encode_defaults(s);
304 switch (avctx->codec_id) {
305 case AV_CODEC_ID_MPEG2VIDEO:
306 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
307 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
308 av_log(avctx, AV_LOG_ERROR,
309 "only YUV420 and YUV422 are supported\n");
310 return AVERROR(EINVAL);
313 case AV_CODEC_ID_MJPEG:
314 case AV_CODEC_ID_AMV:
315 format_supported = 0;
316 /* JPEG color space */
317 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
318 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
319 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
320 (avctx->color_range == AVCOL_RANGE_JPEG &&
321 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
322 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
323 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
324 format_supported = 1;
325 /* MPEG color space */
326 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
327 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
328 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
329 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
330 format_supported = 1;
332 if (!format_supported) {
333 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
334 return AVERROR(EINVAL);
337 case AV_CODEC_ID_SPEEDHQ:
338 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
339 avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
340 avctx->pix_fmt != AV_PIX_FMT_YUV444P) {
341 av_log(avctx, AV_LOG_ERROR,
342 "only YUV420/YUV422/YUV444 are supported (no alpha support yet)\n");
343 return AVERROR(EINVAL);
347 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
348 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
349 return AVERROR(EINVAL);
353 switch (avctx->pix_fmt) {
354 case AV_PIX_FMT_YUVJ444P:
355 case AV_PIX_FMT_YUV444P:
356 s->chroma_format = CHROMA_444;
358 case AV_PIX_FMT_YUVJ422P:
359 case AV_PIX_FMT_YUV422P:
360 s->chroma_format = CHROMA_422;
362 case AV_PIX_FMT_YUVJ420P:
363 case AV_PIX_FMT_YUV420P:
365 s->chroma_format = CHROMA_420;
369 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
371 #if FF_API_PRIVATE_OPT
372 FF_DISABLE_DEPRECATION_WARNINGS
373 if (avctx->rtp_payload_size)
374 s->rtp_payload_size = avctx->rtp_payload_size;
375 if (avctx->me_penalty_compensation)
376 s->me_penalty_compensation = avctx->me_penalty_compensation;
378 s->me_pre = avctx->pre_me;
379 FF_ENABLE_DEPRECATION_WARNINGS
382 s->bit_rate = avctx->bit_rate;
383 s->width = avctx->width;
384 s->height = avctx->height;
385 if (avctx->gop_size > 600 &&
386 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
387 av_log(avctx, AV_LOG_WARNING,
388 "keyframe interval too large!, reducing it from %d to %d\n",
389 avctx->gop_size, 600);
390 avctx->gop_size = 600;
392 s->gop_size = avctx->gop_size;
394 if (avctx->max_b_frames > MAX_B_FRAMES) {
395 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
396 "is %d.\n", MAX_B_FRAMES);
397 avctx->max_b_frames = MAX_B_FRAMES;
399 s->max_b_frames = avctx->max_b_frames;
400 s->codec_id = avctx->codec->id;
401 s->strict_std_compliance = avctx->strict_std_compliance;
402 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
403 s->rtp_mode = !!s->rtp_payload_size;
404 s->intra_dc_precision = avctx->intra_dc_precision;
406 // workaround some differences between how applications specify dc precision
407 if (s->intra_dc_precision < 0) {
408 s->intra_dc_precision += 8;
409 } else if (s->intra_dc_precision >= 8)
410 s->intra_dc_precision -= 8;
412 if (s->intra_dc_precision < 0) {
413 av_log(avctx, AV_LOG_ERROR,
414 "intra dc precision must be positive, note some applications use"
415 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
416 return AVERROR(EINVAL);
419 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
422 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
423 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
424 return AVERROR(EINVAL);
426 s->user_specified_pts = AV_NOPTS_VALUE;
428 if (s->gop_size <= 1) {
436 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
438 s->adaptive_quant = (avctx->lumi_masking ||
439 avctx->dark_masking ||
440 avctx->temporal_cplx_masking ||
441 avctx->spatial_cplx_masking ||
444 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
447 s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
449 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
450 switch(avctx->codec_id) {
451 case AV_CODEC_ID_MPEG1VIDEO:
452 case AV_CODEC_ID_MPEG2VIDEO:
453 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
455 case AV_CODEC_ID_MPEG4:
456 case AV_CODEC_ID_MSMPEG4V1:
457 case AV_CODEC_ID_MSMPEG4V2:
458 case AV_CODEC_ID_MSMPEG4V3:
459 if (avctx->rc_max_rate >= 15000000) {
460 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
461 } else if(avctx->rc_max_rate >= 2000000) {
462 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
463 } else if(avctx->rc_max_rate >= 384000) {
464 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
466 avctx->rc_buffer_size = 40;
467 avctx->rc_buffer_size *= 16384;
470 if (avctx->rc_buffer_size) {
471 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
475 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
476 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
477 return AVERROR(EINVAL);
480 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
481 av_log(avctx, AV_LOG_INFO,
482 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
485 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
486 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
487 return AVERROR(EINVAL);
490 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
491 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
492 return AVERROR(EINVAL);
495 if (avctx->rc_max_rate &&
496 avctx->rc_max_rate == avctx->bit_rate &&
497 avctx->rc_max_rate != avctx->rc_min_rate) {
498 av_log(avctx, AV_LOG_INFO,
499 "impossible bitrate constraints, this will fail\n");
502 if (avctx->rc_buffer_size &&
503 avctx->bit_rate * (int64_t)avctx->time_base.num >
504 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
505 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
506 return AVERROR(EINVAL);
509 if (!s->fixed_qscale &&
510 avctx->bit_rate * av_q2d(avctx->time_base) >
511 avctx->bit_rate_tolerance) {
512 av_log(avctx, AV_LOG_WARNING,
513 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
514 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
517 if (avctx->rc_max_rate &&
518 avctx->rc_min_rate == avctx->rc_max_rate &&
519 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
520 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
521 90000LL * (avctx->rc_buffer_size - 1) >
522 avctx->rc_max_rate * 0xFFFFLL) {
523 av_log(avctx, AV_LOG_INFO,
524 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
525 "specified vbv buffer is too large for the given bitrate!\n");
528 if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
529 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
530 s->codec_id != AV_CODEC_ID_FLV1) {
531 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
532 return AVERROR(EINVAL);
535 if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
536 av_log(avctx, AV_LOG_ERROR,
537 "OBMC is only supported with simple mb decision\n");
538 return AVERROR(EINVAL);
541 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
542 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
543 return AVERROR(EINVAL);
546 if (s->max_b_frames &&
547 s->codec_id != AV_CODEC_ID_MPEG4 &&
548 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
549 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
550 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
551 return AVERROR(EINVAL);
553 if (s->max_b_frames < 0) {
554 av_log(avctx, AV_LOG_ERROR,
555 "max b frames must be 0 or positive for mpegvideo based encoders\n");
556 return AVERROR(EINVAL);
559 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
560 s->codec_id == AV_CODEC_ID_H263 ||
561 s->codec_id == AV_CODEC_ID_H263P) &&
562 (avctx->sample_aspect_ratio.num > 255 ||
563 avctx->sample_aspect_ratio.den > 255)) {
564 av_log(avctx, AV_LOG_WARNING,
565 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
566 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
567 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
568 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
571 if ((s->codec_id == AV_CODEC_ID_H263 ||
572 s->codec_id == AV_CODEC_ID_H263P) &&
573 (avctx->width > 2048 ||
574 avctx->height > 1152 )) {
575 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
576 return AVERROR(EINVAL);
578 if ((s->codec_id == AV_CODEC_ID_H263 ||
579 s->codec_id == AV_CODEC_ID_H263P) &&
580 ((avctx->width &3) ||
581 (avctx->height&3) )) {
582 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
583 return AVERROR(EINVAL);
586 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
587 (avctx->width > 4095 ||
588 avctx->height > 4095 )) {
589 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
590 return AVERROR(EINVAL);
593 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
594 (avctx->width > 16383 ||
595 avctx->height > 16383 )) {
596 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
597 return AVERROR(EINVAL);
600 if (s->codec_id == AV_CODEC_ID_RV10 &&
602 avctx->height&15 )) {
603 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
604 return AVERROR(EINVAL);
607 if (s->codec_id == AV_CODEC_ID_RV20 &&
610 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
611 return AVERROR(EINVAL);
614 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
615 s->codec_id == AV_CODEC_ID_WMV2) &&
617 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
618 return AVERROR(EINVAL);
621 if ((avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
622 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
623 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
624 return AVERROR(EINVAL);
627 #if FF_API_PRIVATE_OPT
628 FF_DISABLE_DEPRECATION_WARNINGS
629 if (avctx->mpeg_quant)
630 s->mpeg_quant = avctx->mpeg_quant;
631 FF_ENABLE_DEPRECATION_WARNINGS
634 // FIXME mpeg2 uses that too
635 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
636 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
637 av_log(avctx, AV_LOG_ERROR,
638 "mpeg2 style quantization not supported by codec\n");
639 return AVERROR(EINVAL);
642 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
643 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
644 return AVERROR(EINVAL);
647 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
648 avctx->mb_decision != FF_MB_DECISION_RD) {
649 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
650 return AVERROR(EINVAL);
653 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
654 (s->codec_id == AV_CODEC_ID_AMV ||
655 s->codec_id == AV_CODEC_ID_MJPEG)) {
656 // Used to produce garbage with MJPEG.
657 av_log(avctx, AV_LOG_ERROR,
658 "QP RD is no longer compatible with MJPEG or AMV\n");
659 return AVERROR(EINVAL);
662 #if FF_API_PRIVATE_OPT
663 FF_DISABLE_DEPRECATION_WARNINGS
664 if (avctx->scenechange_threshold)
665 s->scenechange_threshold = avctx->scenechange_threshold;
666 FF_ENABLE_DEPRECATION_WARNINGS
669 if (s->scenechange_threshold < 1000000000 &&
670 (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
671 av_log(avctx, AV_LOG_ERROR,
672 "closed gop with scene change detection are not supported yet, "
673 "set threshold to 1000000000\n");
674 return AVERROR_PATCHWELCOME;
677 if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
678 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
679 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
680 av_log(avctx, AV_LOG_ERROR,
681 "low delay forcing is only available for mpeg2, "
682 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
683 return AVERROR(EINVAL);
685 if (s->max_b_frames != 0) {
686 av_log(avctx, AV_LOG_ERROR,
687 "B-frames cannot be used with low delay\n");
688 return AVERROR(EINVAL);
692 if (s->q_scale_type == 1) {
693 if (avctx->qmax > 28) {
694 av_log(avctx, AV_LOG_ERROR,
695 "non linear quant only supports qmax <= 28 currently\n");
696 return AVERROR_PATCHWELCOME;
700 if (avctx->slices > 1 &&
701 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
702 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
703 return AVERROR(EINVAL);
706 if (avctx->thread_count > 1 &&
707 s->codec_id != AV_CODEC_ID_MPEG4 &&
708 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
709 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
710 s->codec_id != AV_CODEC_ID_MJPEG &&
711 (s->codec_id != AV_CODEC_ID_H263P)) {
712 av_log(avctx, AV_LOG_ERROR,
713 "multi threaded encoding not supported by codec\n");
714 return AVERROR_PATCHWELCOME;
717 if (avctx->thread_count < 1) {
718 av_log(avctx, AV_LOG_ERROR,
719 "automatic thread number detection not supported by codec, "
721 return AVERROR_PATCHWELCOME;
724 if (!avctx->time_base.den || !avctx->time_base.num) {
725 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
726 return AVERROR(EINVAL);
729 #if FF_API_PRIVATE_OPT
730 FF_DISABLE_DEPRECATION_WARNINGS
731 if (avctx->b_frame_strategy)
732 s->b_frame_strategy = avctx->b_frame_strategy;
733 if (avctx->b_sensitivity != 40)
734 s->b_sensitivity = avctx->b_sensitivity;
735 FF_ENABLE_DEPRECATION_WARNINGS
738 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
739 av_log(avctx, AV_LOG_INFO,
740 "notice: b_frame_strategy only affects the first pass\n");
741 s->b_frame_strategy = 0;
744 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
746 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
747 avctx->time_base.den /= i;
748 avctx->time_base.num /= i;
752 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
753 // (a + x * 3 / 8) / x
754 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
755 s->inter_quant_bias = 0;
757 s->intra_quant_bias = 0;
759 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
762 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
763 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
764 return AVERROR(EINVAL);
767 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
769 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
770 avctx->time_base.den > (1 << 16) - 1) {
771 av_log(avctx, AV_LOG_ERROR,
772 "timebase %d/%d not supported by MPEG 4 standard, "
773 "the maximum admitted value for the timebase denominator "
774 "is %d\n", avctx->time_base.num, avctx->time_base.den,
776 return AVERROR(EINVAL);
778 s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
780 switch (avctx->codec->id) {
781 case AV_CODEC_ID_MPEG1VIDEO:
782 s->out_format = FMT_MPEG1;
783 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
784 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
786 case AV_CODEC_ID_MPEG2VIDEO:
787 s->out_format = FMT_MPEG1;
788 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
789 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
792 case AV_CODEC_ID_MJPEG:
793 case AV_CODEC_ID_AMV:
794 s->out_format = FMT_MJPEG;
795 s->intra_only = 1; /* force intra only for jpeg */
796 if (!CONFIG_MJPEG_ENCODER)
797 return AVERROR_ENCODER_NOT_FOUND;
798 if ((ret = ff_mjpeg_encode_init(s)) < 0)
803 case AV_CODEC_ID_SPEEDHQ:
804 s->out_format = FMT_SPEEDHQ;
805 s->intra_only = 1; /* force intra only for SHQ */
806 if (!CONFIG_SPEEDHQ_ENCODER)
807 return AVERROR_ENCODER_NOT_FOUND;
808 if ((ret = ff_speedhq_encode_init(s)) < 0)
813 case AV_CODEC_ID_H261:
814 if (!CONFIG_H261_ENCODER)
815 return AVERROR_ENCODER_NOT_FOUND;
816 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
817 av_log(avctx, AV_LOG_ERROR,
818 "The specified picture size of %dx%d is not valid for the "
819 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
820 s->width, s->height);
821 return AVERROR(EINVAL);
823 s->out_format = FMT_H261;
826 s->rtp_mode = 0; /* Sliced encoding not supported */
828 case AV_CODEC_ID_H263:
829 if (!CONFIG_H263_ENCODER)
830 return AVERROR_ENCODER_NOT_FOUND;
831 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
832 s->width, s->height) == 8) {
833 av_log(avctx, AV_LOG_ERROR,
834 "The specified picture size of %dx%d is not valid for "
835 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
836 "352x288, 704x576, and 1408x1152. "
837 "Try H.263+.\n", s->width, s->height);
838 return AVERROR(EINVAL);
840 s->out_format = FMT_H263;
844 case AV_CODEC_ID_H263P:
845 s->out_format = FMT_H263;
848 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
849 s->modified_quant = s->h263_aic;
850 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
851 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
854 /* These are just to be sure */
858 case AV_CODEC_ID_FLV1:
859 s->out_format = FMT_H263;
860 s->h263_flv = 2; /* format = 1; 11-bit codes */
861 s->unrestricted_mv = 1;
862 s->rtp_mode = 0; /* don't allow GOB */
866 case AV_CODEC_ID_RV10:
867 s->out_format = FMT_H263;
871 case AV_CODEC_ID_RV20:
872 s->out_format = FMT_H263;
875 s->modified_quant = 1;
879 s->unrestricted_mv = 0;
881 case AV_CODEC_ID_MPEG4:
882 s->out_format = FMT_H263;
884 s->unrestricted_mv = 1;
885 s->low_delay = s->max_b_frames ? 0 : 1;
886 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
888 case AV_CODEC_ID_MSMPEG4V2:
889 s->out_format = FMT_H263;
891 s->unrestricted_mv = 1;
892 s->msmpeg4_version = 2;
896 case AV_CODEC_ID_MSMPEG4V3:
897 s->out_format = FMT_H263;
899 s->unrestricted_mv = 1;
900 s->msmpeg4_version = 3;
901 s->flipflop_rounding = 1;
905 case AV_CODEC_ID_WMV1:
906 s->out_format = FMT_H263;
908 s->unrestricted_mv = 1;
909 s->msmpeg4_version = 4;
910 s->flipflop_rounding = 1;
914 case AV_CODEC_ID_WMV2:
915 s->out_format = FMT_H263;
917 s->unrestricted_mv = 1;
918 s->msmpeg4_version = 5;
919 s->flipflop_rounding = 1;
924 return AVERROR(EINVAL);
927 #if FF_API_PRIVATE_OPT
928 FF_DISABLE_DEPRECATION_WARNINGS
929 if (avctx->noise_reduction)
930 s->noise_reduction = avctx->noise_reduction;
931 FF_ENABLE_DEPRECATION_WARNINGS
934 avctx->has_b_frames = !s->low_delay;
938 s->progressive_frame =
939 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
940 AV_CODEC_FLAG_INTERLACED_ME) ||
945 if ((ret = ff_mpv_common_init(s)) < 0)
948 ff_fdctdsp_init(&s->fdsp, avctx);
949 ff_me_cmp_init(&s->mecc, avctx);
950 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
951 ff_pixblockdsp_init(&s->pdsp, avctx);
952 ff_qpeldsp_init(&s->qdsp);
954 if (s->msmpeg4_version) {
955 int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
956 if (!(s->ac_stats = av_mallocz(ac_stats_size)))
957 return AVERROR(ENOMEM);
960 if (!(avctx->stats_out = av_mallocz(256)) ||
961 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
962 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
963 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
964 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
965 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
966 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
967 !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
968 !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
969 return AVERROR(ENOMEM);
971 if (s->noise_reduction) {
972 if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
973 return AVERROR(ENOMEM);
976 ff_dct_encode_init(s);
978 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
979 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
981 if (s->slice_context_count > 1) {
984 if (avctx->codec_id == AV_CODEC_ID_H263P)
985 s->h263_slice_structured = 1;
988 s->quant_precision = 5;
990 #if FF_API_PRIVATE_OPT
991 FF_DISABLE_DEPRECATION_WARNINGS
992 if (avctx->frame_skip_threshold)
993 s->frame_skip_threshold = avctx->frame_skip_threshold;
994 if (avctx->frame_skip_factor)
995 s->frame_skip_factor = avctx->frame_skip_factor;
996 if (avctx->frame_skip_exp)
997 s->frame_skip_exp = avctx->frame_skip_exp;
998 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
999 s->frame_skip_cmp = avctx->frame_skip_cmp;
1000 FF_ENABLE_DEPRECATION_WARNINGS
1003 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
1004 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
1006 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
1007 ff_h261_encode_init(s);
1008 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
1009 ff_h263_encode_init(s);
1010 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
1011 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
1013 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1014 && s->out_format == FMT_MPEG1)
1015 ff_mpeg1_encode_init(s);
1018 for (i = 0; i < 64; i++) {
1019 int j = s->idsp.idct_permutation[i];
1020 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1022 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1023 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1024 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1025 s->intra_matrix[j] =
1026 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1027 } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
1028 s->intra_matrix[j] =
1029 s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1032 s->chroma_intra_matrix[j] =
1033 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1034 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1036 if (avctx->intra_matrix)
1037 s->intra_matrix[j] = avctx->intra_matrix[i];
1038 if (avctx->inter_matrix)
1039 s->inter_matrix[j] = avctx->inter_matrix[i];
1042 /* precompute matrix */
1043 /* for mjpeg, we do include qscale in the matrix */
1044 if (s->out_format != FMT_MJPEG) {
1045 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1046 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1048 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1049 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1053 if ((ret = ff_rate_control_init(s)) < 0)
1056 #if FF_API_PRIVATE_OPT
1057 FF_DISABLE_DEPRECATION_WARNINGS
1058 if (avctx->brd_scale)
1059 s->brd_scale = avctx->brd_scale;
1061 if (avctx->prediction_method)
1062 s->pred = avctx->prediction_method + 1;
1063 FF_ENABLE_DEPRECATION_WARNINGS
1066 if (s->b_frame_strategy == 2) {
1067 for (i = 0; i < s->max_b_frames + 2; i++) {
1068 s->tmp_frames[i] = av_frame_alloc();
1069 if (!s->tmp_frames[i])
1070 return AVERROR(ENOMEM);
1072 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1073 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1074 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1076 ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1082 cpb_props = ff_add_cpb_side_data(avctx);
1084 return AVERROR(ENOMEM);
1085 cpb_props->max_bitrate = avctx->rc_max_rate;
1086 cpb_props->min_bitrate = avctx->rc_min_rate;
1087 cpb_props->avg_bitrate = avctx->bit_rate;
1088 cpb_props->buffer_size = avctx->rc_buffer_size;
1093 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1095 MpegEncContext *s = avctx->priv_data;
1098 ff_rate_control_uninit(s);
1100 ff_mpv_common_end(s);
1101 if (CONFIG_MJPEG_ENCODER &&
1102 s->out_format == FMT_MJPEG)
1103 ff_mjpeg_encode_close(s);
1105 av_freep(&avctx->extradata);
1107 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1108 av_frame_free(&s->tmp_frames[i]);
1110 ff_free_picture_tables(&s->new_picture);
1111 ff_mpeg_unref_picture(avctx, &s->new_picture);
1113 av_freep(&avctx->stats_out);
1114 av_freep(&s->ac_stats);
1116 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1117 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1118 s->q_chroma_intra_matrix= NULL;
1119 s->q_chroma_intra_matrix16= NULL;
1120 av_freep(&s->q_intra_matrix);
1121 av_freep(&s->q_inter_matrix);
1122 av_freep(&s->q_intra_matrix16);
1123 av_freep(&s->q_inter_matrix16);
1124 av_freep(&s->input_picture);
1125 av_freep(&s->reordered_input_picture);
1126 av_freep(&s->dct_offset);
1131 static int get_sae(uint8_t *src, int ref, int stride)
1136 for (y = 0; y < 16; y++) {
1137 for (x = 0; x < 16; x++) {
1138 acc += FFABS(src[x + y * stride] - ref);
1145 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1146 uint8_t *ref, int stride)
1152 h = s->height & ~15;
1154 for (y = 0; y < h; y += 16) {
1155 for (x = 0; x < w; x += 16) {
1156 int offset = x + y * stride;
1157 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1159 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1160 int sae = get_sae(src + offset, mean, stride);
1162 acc += sae + 500 < sad;
1168 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1170 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1171 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1172 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1173 &s->linesize, &s->uvlinesize);
1176 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1178 Picture *pic = NULL;
1180 int i, display_picture_number = 0, ret;
1181 int encoding_delay = s->max_b_frames ? s->max_b_frames
1182 : (s->low_delay ? 0 : 1);
1183 int flush_offset = 1;
1188 display_picture_number = s->input_picture_number++;
1190 if (pts != AV_NOPTS_VALUE) {
1191 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1192 int64_t last = s->user_specified_pts;
1195 av_log(s->avctx, AV_LOG_ERROR,
1196 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1198 return AVERROR(EINVAL);
1201 if (!s->low_delay && display_picture_number == 1)
1202 s->dts_delta = pts - last;
1204 s->user_specified_pts = pts;
1206 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1207 s->user_specified_pts =
1208 pts = s->user_specified_pts + 1;
1209 av_log(s->avctx, AV_LOG_INFO,
1210 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1213 pts = display_picture_number;
1217 if (!pic_arg->buf[0] ||
1218 pic_arg->linesize[0] != s->linesize ||
1219 pic_arg->linesize[1] != s->uvlinesize ||
1220 pic_arg->linesize[2] != s->uvlinesize)
1222 if ((s->width & 15) || (s->height & 15))
1224 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1226 if (s->linesize & (STRIDE_ALIGN-1))
1229 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1230 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1232 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1236 pic = &s->picture[i];
1240 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1243 ret = alloc_picture(s, pic, direct);
1248 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1249 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1250 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1253 int h_chroma_shift, v_chroma_shift;
1254 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1258 for (i = 0; i < 3; i++) {
1259 int src_stride = pic_arg->linesize[i];
1260 int dst_stride = i ? s->uvlinesize : s->linesize;
1261 int h_shift = i ? h_chroma_shift : 0;
1262 int v_shift = i ? v_chroma_shift : 0;
1263 int w = s->width >> h_shift;
1264 int h = s->height >> v_shift;
1265 uint8_t *src = pic_arg->data[i];
1266 uint8_t *dst = pic->f->data[i];
1269 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1270 && !s->progressive_sequence
1271 && FFALIGN(s->height, 32) - s->height > 16)
1274 if (!s->avctx->rc_buffer_size)
1275 dst += INPLACE_OFFSET;
1277 if (src_stride == dst_stride)
1278 memcpy(dst, src, src_stride * h);
1281 uint8_t *dst2 = dst;
1283 memcpy(dst2, src, w);
1288 if ((s->width & 15) || (s->height & (vpad-1))) {
1289 s->mpvencdsp.draw_edges(dst, dst_stride,
1299 ret = av_frame_copy_props(pic->f, pic_arg);
1303 pic->f->display_picture_number = display_picture_number;
1304 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1306 /* Flushing: When we have not received enough input frames,
1307 * ensure s->input_picture[0] contains the first picture */
1308 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1309 if (s->input_picture[flush_offset])
1312 if (flush_offset <= 1)
1315 encoding_delay = encoding_delay - flush_offset + 1;
1318 /* shift buffer entries */
1319 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1320 s->input_picture[i - flush_offset] = s->input_picture[i];
1322 s->input_picture[encoding_delay] = (Picture*) pic;
1327 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1331 int64_t score64 = 0;
1333 for (plane = 0; plane < 3; plane++) {
1334 const int stride = p->f->linesize[plane];
1335 const int bw = plane ? 1 : 2;
1336 for (y = 0; y < s->mb_height * bw; y++) {
1337 for (x = 0; x < s->mb_width * bw; x++) {
1338 int off = p->shared ? 0 : 16;
1339 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1340 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1341 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1343 switch (FFABS(s->frame_skip_exp)) {
1344 case 0: score = FFMAX(score, v); break;
1345 case 1: score += FFABS(v); break;
1346 case 2: score64 += v * (int64_t)v; break;
1347 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1348 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1357 if (s->frame_skip_exp < 0)
1358 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1359 -1.0/s->frame_skip_exp);
1361 if (score64 < s->frame_skip_threshold)
1363 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1368 static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
1373 ret = avcodec_send_frame(c, frame);
1378 ret = avcodec_receive_packet(c, pkt);
1381 av_packet_unref(pkt);
1382 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1389 static int estimate_best_b_count(MpegEncContext *s)
1391 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1393 const int scale = s->brd_scale;
1394 int width = s->width >> scale;
1395 int height = s->height >> scale;
1396 int i, j, out_size, p_lambda, b_lambda, lambda2;
1397 int64_t best_rd = INT64_MAX;
1398 int best_b_count = -1;
1401 av_assert0(scale >= 0 && scale <= 3);
1403 pkt = av_packet_alloc();
1405 return AVERROR(ENOMEM);
1408 //s->next_picture_ptr->quality;
1409 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1410 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1411 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1412 if (!b_lambda) // FIXME we should do this somewhere else
1413 b_lambda = p_lambda;
1414 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1417 for (i = 0; i < s->max_b_frames + 2; i++) {
1418 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1419 s->next_picture_ptr;
1422 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1423 pre_input = *pre_input_ptr;
1424 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1426 if (!pre_input.shared && i) {
1427 data[0] += INPLACE_OFFSET;
1428 data[1] += INPLACE_OFFSET;
1429 data[2] += INPLACE_OFFSET;
1432 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1433 s->tmp_frames[i]->linesize[0],
1435 pre_input.f->linesize[0],
1437 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1438 s->tmp_frames[i]->linesize[1],
1440 pre_input.f->linesize[1],
1441 width >> 1, height >> 1);
1442 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1443 s->tmp_frames[i]->linesize[2],
1445 pre_input.f->linesize[2],
1446 width >> 1, height >> 1);
1450 for (j = 0; j < s->max_b_frames + 1; j++) {
1454 if (!s->input_picture[j])
1457 c = avcodec_alloc_context3(NULL);
1459 ret = AVERROR(ENOMEM);
1465 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1466 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1467 c->mb_decision = s->avctx->mb_decision;
1468 c->me_cmp = s->avctx->me_cmp;
1469 c->mb_cmp = s->avctx->mb_cmp;
1470 c->me_sub_cmp = s->avctx->me_sub_cmp;
1471 c->pix_fmt = AV_PIX_FMT_YUV420P;
1472 c->time_base = s->avctx->time_base;
1473 c->max_b_frames = s->max_b_frames;
1475 ret = avcodec_open2(c, codec, NULL);
1480 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1481 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1483 out_size = encode_frame(c, s->tmp_frames[0], pkt);
1489 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1491 for (i = 0; i < s->max_b_frames + 1; i++) {
1492 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1494 s->tmp_frames[i + 1]->pict_type = is_p ?
1495 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1496 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1498 out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1504 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1507 /* get the delayed frames */
1508 out_size = encode_frame(c, NULL, pkt);
1513 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1515 rd += c->error[0] + c->error[1] + c->error[2];
1523 avcodec_free_context(&c);
1524 av_packet_unref(pkt);
1531 av_packet_free(&pkt);
1533 return best_b_count;
1536 static int select_input_picture(MpegEncContext *s)
1540 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1541 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1542 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1544 /* set next picture type & ordering */
1545 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1546 if (s->frame_skip_threshold || s->frame_skip_factor) {
1547 if (s->picture_in_gop_number < s->gop_size &&
1548 s->next_picture_ptr &&
1549 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1550 // FIXME check that the gop check above is +-1 correct
1551 av_frame_unref(s->input_picture[0]->f);
1553 ff_vbv_update(s, 0);
1559 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1560 !s->next_picture_ptr || s->intra_only) {
1561 s->reordered_input_picture[0] = s->input_picture[0];
1562 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1563 s->reordered_input_picture[0]->f->coded_picture_number =
1564 s->coded_picture_number++;
1568 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1569 for (i = 0; i < s->max_b_frames + 1; i++) {
1570 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1572 if (pict_num >= s->rc_context.num_entries)
1574 if (!s->input_picture[i]) {
1575 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1579 s->input_picture[i]->f->pict_type =
1580 s->rc_context.entry[pict_num].new_pict_type;
1584 if (s->b_frame_strategy == 0) {
1585 b_frames = s->max_b_frames;
1586 while (b_frames && !s->input_picture[b_frames])
1588 } else if (s->b_frame_strategy == 1) {
1589 for (i = 1; i < s->max_b_frames + 1; i++) {
1590 if (s->input_picture[i] &&
1591 s->input_picture[i]->b_frame_score == 0) {
1592 s->input_picture[i]->b_frame_score =
1594 s->input_picture[i ]->f->data[0],
1595 s->input_picture[i - 1]->f->data[0],
1599 for (i = 0; i < s->max_b_frames + 1; i++) {
1600 if (!s->input_picture[i] ||
1601 s->input_picture[i]->b_frame_score - 1 >
1602 s->mb_num / s->b_sensitivity)
1606 b_frames = FFMAX(0, i - 1);
1609 for (i = 0; i < b_frames + 1; i++) {
1610 s->input_picture[i]->b_frame_score = 0;
1612 } else if (s->b_frame_strategy == 2) {
1613 b_frames = estimate_best_b_count(s);
1620 for (i = b_frames - 1; i >= 0; i--) {
1621 int type = s->input_picture[i]->f->pict_type;
1622 if (type && type != AV_PICTURE_TYPE_B)
1625 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1626 b_frames == s->max_b_frames) {
1627 av_log(s->avctx, AV_LOG_ERROR,
1628 "warning, too many B-frames in a row\n");
1631 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1632 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1633 s->gop_size > s->picture_in_gop_number) {
1634 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1636 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1638 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1642 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1643 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1646 s->reordered_input_picture[0] = s->input_picture[b_frames];
1647 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1648 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1649 s->reordered_input_picture[0]->f->coded_picture_number =
1650 s->coded_picture_number++;
1651 for (i = 0; i < b_frames; i++) {
1652 s->reordered_input_picture[i + 1] = s->input_picture[i];
1653 s->reordered_input_picture[i + 1]->f->pict_type =
1655 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1656 s->coded_picture_number++;
1661 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1663 if (s->reordered_input_picture[0]) {
1664 s->reordered_input_picture[0]->reference =
1665 s->reordered_input_picture[0]->f->pict_type !=
1666 AV_PICTURE_TYPE_B ? 3 : 0;
1668 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1671 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1672 // input is a shared pix, so we can't modify it -> allocate a new
1673 // one & ensure that the shared one is reuseable
1676 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1679 pic = &s->picture[i];
1681 pic->reference = s->reordered_input_picture[0]->reference;
1682 if (alloc_picture(s, pic, 0) < 0) {
1686 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1690 /* mark us unused / free shared pic */
1691 av_frame_unref(s->reordered_input_picture[0]->f);
1692 s->reordered_input_picture[0]->shared = 0;
1694 s->current_picture_ptr = pic;
1696 // input is not a shared pix -> reuse buffer for current_pix
1697 s->current_picture_ptr = s->reordered_input_picture[0];
1698 for (i = 0; i < 4; i++) {
1699 if (s->new_picture.f->data[i])
1700 s->new_picture.f->data[i] += INPLACE_OFFSET;
1703 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1704 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1705 s->current_picture_ptr)) < 0)
1708 s->picture_number = s->new_picture.f->display_picture_number;
1713 static void frame_end(MpegEncContext *s)
1715 if (s->unrestricted_mv &&
1716 s->current_picture.reference &&
1718 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1719 int hshift = desc->log2_chroma_w;
1720 int vshift = desc->log2_chroma_h;
1721 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1722 s->current_picture.f->linesize[0],
1723 s->h_edge_pos, s->v_edge_pos,
1724 EDGE_WIDTH, EDGE_WIDTH,
1725 EDGE_TOP | EDGE_BOTTOM);
1726 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1727 s->current_picture.f->linesize[1],
1728 s->h_edge_pos >> hshift,
1729 s->v_edge_pos >> vshift,
1730 EDGE_WIDTH >> hshift,
1731 EDGE_WIDTH >> vshift,
1732 EDGE_TOP | EDGE_BOTTOM);
1733 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1734 s->current_picture.f->linesize[2],
1735 s->h_edge_pos >> hshift,
1736 s->v_edge_pos >> vshift,
1737 EDGE_WIDTH >> hshift,
1738 EDGE_WIDTH >> vshift,
1739 EDGE_TOP | EDGE_BOTTOM);
1744 s->last_pict_type = s->pict_type;
1745 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1746 if (s->pict_type!= AV_PICTURE_TYPE_B)
1747 s->last_non_b_pict_type = s->pict_type;
1749 #if FF_API_CODED_FRAME
1750 FF_DISABLE_DEPRECATION_WARNINGS
1751 av_frame_unref(s->avctx->coded_frame);
1752 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1753 FF_ENABLE_DEPRECATION_WARNINGS
1755 #if FF_API_ERROR_FRAME
1756 FF_DISABLE_DEPRECATION_WARNINGS
1757 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1758 sizeof(s->current_picture.encoding_error));
1759 FF_ENABLE_DEPRECATION_WARNINGS
1763 static void update_noise_reduction(MpegEncContext *s)
1767 for (intra = 0; intra < 2; intra++) {
1768 if (s->dct_count[intra] > (1 << 16)) {
1769 for (i = 0; i < 64; i++) {
1770 s->dct_error_sum[intra][i] >>= 1;
1772 s->dct_count[intra] >>= 1;
1775 for (i = 0; i < 64; i++) {
1776 s->dct_offset[intra][i] = (s->noise_reduction *
1777 s->dct_count[intra] +
1778 s->dct_error_sum[intra][i] / 2) /
1779 (s->dct_error_sum[intra][i] + 1);
1784 static int frame_start(MpegEncContext *s)
1788 /* mark & release old frames */
1789 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1790 s->last_picture_ptr != s->next_picture_ptr &&
1791 s->last_picture_ptr->f->buf[0]) {
1792 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1795 s->current_picture_ptr->f->pict_type = s->pict_type;
1796 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1798 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1799 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1800 s->current_picture_ptr)) < 0)
1803 if (s->pict_type != AV_PICTURE_TYPE_B) {
1804 s->last_picture_ptr = s->next_picture_ptr;
1806 s->next_picture_ptr = s->current_picture_ptr;
1809 if (s->last_picture_ptr) {
1810 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1811 if (s->last_picture_ptr->f->buf[0] &&
1812 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1813 s->last_picture_ptr)) < 0)
1816 if (s->next_picture_ptr) {
1817 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1818 if (s->next_picture_ptr->f->buf[0] &&
1819 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1820 s->next_picture_ptr)) < 0)
1824 if (s->picture_structure!= PICT_FRAME) {
1826 for (i = 0; i < 4; i++) {
1827 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1828 s->current_picture.f->data[i] +=
1829 s->current_picture.f->linesize[i];
1831 s->current_picture.f->linesize[i] *= 2;
1832 s->last_picture.f->linesize[i] *= 2;
1833 s->next_picture.f->linesize[i] *= 2;
1837 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1838 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1839 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1840 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1841 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1842 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1844 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1845 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1848 if (s->dct_error_sum) {
1849 av_assert2(s->noise_reduction && s->encoding);
1850 update_noise_reduction(s);
1856 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1857 const AVFrame *pic_arg, int *got_packet)
1859 MpegEncContext *s = avctx->priv_data;
1860 int i, stuffing_count, ret;
1861 int context_count = s->slice_context_count;
1863 s->vbv_ignore_qmax = 0;
1865 s->picture_in_gop_number++;
1867 if (load_input_picture(s, pic_arg) < 0)
1870 if (select_input_picture(s) < 0) {
1875 if (s->new_picture.f->data[0]) {
1876 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1877 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1879 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1880 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1883 s->mb_info_ptr = av_packet_new_side_data(pkt,
1884 AV_PKT_DATA_H263_MB_INFO,
1885 s->mb_width*s->mb_height*12);
1886 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1889 for (i = 0; i < context_count; i++) {
1890 int start_y = s->thread_context[i]->start_mb_y;
1891 int end_y = s->thread_context[i]-> end_mb_y;
1892 int h = s->mb_height;
1893 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1894 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1896 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1899 s->pict_type = s->new_picture.f->pict_type;
1901 ret = frame_start(s);
1905 ret = encode_picture(s, s->picture_number);
1906 if (growing_buffer) {
1907 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1908 pkt->data = s->pb.buf;
1909 pkt->size = avctx->internal->byte_buffer_size;
1914 #if FF_API_STAT_BITS
1915 FF_DISABLE_DEPRECATION_WARNINGS
1916 avctx->header_bits = s->header_bits;
1917 avctx->mv_bits = s->mv_bits;
1918 avctx->misc_bits = s->misc_bits;
1919 avctx->i_tex_bits = s->i_tex_bits;
1920 avctx->p_tex_bits = s->p_tex_bits;
1921 avctx->i_count = s->i_count;
1922 // FIXME f/b_count in avctx
1923 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1924 avctx->skip_count = s->skip_count;
1925 FF_ENABLE_DEPRECATION_WARNINGS
1930 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1931 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1933 if (avctx->rc_buffer_size) {
1934 RateControlContext *rcc = &s->rc_context;
1935 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1936 int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1937 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1939 if (put_bits_count(&s->pb) > max_size &&
1940 s->lambda < s->lmax) {
1941 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1942 (s->qscale + 1) / s->qscale);
1943 if (s->adaptive_quant) {
1945 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1946 s->lambda_table[i] =
1947 FFMAX(s->lambda_table[i] + min_step,
1948 s->lambda_table[i] * (s->qscale + 1) /
1951 s->mb_skipped = 0; // done in frame_start()
1952 // done in encode_picture() so we must undo it
1953 if (s->pict_type == AV_PICTURE_TYPE_P) {
1954 if (s->flipflop_rounding ||
1955 s->codec_id == AV_CODEC_ID_H263P ||
1956 s->codec_id == AV_CODEC_ID_MPEG4)
1957 s->no_rounding ^= 1;
1959 if (s->pict_type != AV_PICTURE_TYPE_B) {
1960 s->time_base = s->last_time_base;
1961 s->last_non_b_time = s->time - s->pp_time;
1963 for (i = 0; i < context_count; i++) {
1964 PutBitContext *pb = &s->thread_context[i]->pb;
1965 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1967 s->vbv_ignore_qmax = 1;
1968 av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1972 av_assert0(avctx->rc_max_rate);
1975 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1976 ff_write_pass1_stats(s);
1978 for (i = 0; i < 4; i++) {
1979 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1980 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1982 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1983 s->current_picture_ptr->encoding_error,
1984 (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1987 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1988 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1989 s->misc_bits + s->i_tex_bits +
1991 flush_put_bits(&s->pb);
1992 s->frame_bits = put_bits_count(&s->pb);
1994 stuffing_count = ff_vbv_update(s, s->frame_bits);
1995 s->stuffing_bits = 8*stuffing_count;
1996 if (stuffing_count) {
1997 if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1998 av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
2002 switch (s->codec_id) {
2003 case AV_CODEC_ID_MPEG1VIDEO:
2004 case AV_CODEC_ID_MPEG2VIDEO:
2005 while (stuffing_count--) {
2006 put_bits(&s->pb, 8, 0);
2009 case AV_CODEC_ID_MPEG4:
2010 put_bits(&s->pb, 16, 0);
2011 put_bits(&s->pb, 16, 0x1C3);
2012 stuffing_count -= 4;
2013 while (stuffing_count--) {
2014 put_bits(&s->pb, 8, 0xFF);
2018 av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2020 flush_put_bits(&s->pb);
2021 s->frame_bits = put_bits_count(&s->pb);
2024 /* update MPEG-1/2 vbv_delay for CBR */
2025 if (avctx->rc_max_rate &&
2026 avctx->rc_min_rate == avctx->rc_max_rate &&
2027 s->out_format == FMT_MPEG1 &&
2028 90000LL * (avctx->rc_buffer_size - 1) <=
2029 avctx->rc_max_rate * 0xFFFFLL) {
2030 AVCPBProperties *props;
2033 int vbv_delay, min_delay;
2034 double inbits = avctx->rc_max_rate *
2035 av_q2d(avctx->time_base);
2036 int minbits = s->frame_bits - 8 *
2037 (s->vbv_delay_ptr - s->pb.buf - 1);
2038 double bits = s->rc_context.buffer_index + minbits - inbits;
2041 av_log(avctx, AV_LOG_ERROR,
2042 "Internal error, negative bits\n");
2044 av_assert1(s->repeat_first_field == 0);
2046 vbv_delay = bits * 90000 / avctx->rc_max_rate;
2047 min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2050 vbv_delay = FFMAX(vbv_delay, min_delay);
2052 av_assert0(vbv_delay < 0xFFFF);
2054 s->vbv_delay_ptr[0] &= 0xF8;
2055 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2056 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2057 s->vbv_delay_ptr[2] &= 0x07;
2058 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2060 props = av_cpb_properties_alloc(&props_size);
2062 return AVERROR(ENOMEM);
2063 props->vbv_delay = vbv_delay * 300;
2065 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2066 (uint8_t*)props, props_size);
2072 #if FF_API_VBV_DELAY
2073 FF_DISABLE_DEPRECATION_WARNINGS
2074 avctx->vbv_delay = vbv_delay * 300;
2075 FF_ENABLE_DEPRECATION_WARNINGS
2078 s->total_bits += s->frame_bits;
2079 #if FF_API_STAT_BITS
2080 FF_DISABLE_DEPRECATION_WARNINGS
2081 avctx->frame_bits = s->frame_bits;
2082 FF_ENABLE_DEPRECATION_WARNINGS
2086 pkt->pts = s->current_picture.f->pts;
2087 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2088 if (!s->current_picture.f->coded_picture_number)
2089 pkt->dts = pkt->pts - s->dts_delta;
2091 pkt->dts = s->reordered_pts;
2092 s->reordered_pts = pkt->pts;
2094 pkt->dts = pkt->pts;
2095 if (s->current_picture.f->key_frame)
2096 pkt->flags |= AV_PKT_FLAG_KEY;
2098 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2103 /* release non-reference frames */
2104 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2105 if (!s->picture[i].reference)
2106 ff_mpeg_unref_picture(avctx, &s->picture[i]);
2109 av_assert1((s->frame_bits & 7) == 0);
2111 pkt->size = s->frame_bits / 8;
2112 *got_packet = !!pkt->size;
2116 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2117 int n, int threshold)
2119 static const char tab[64] = {
2120 3, 2, 2, 1, 1, 1, 1, 1,
2121 1, 1, 1, 1, 1, 1, 1, 1,
2122 1, 1, 1, 1, 1, 1, 1, 1,
2123 0, 0, 0, 0, 0, 0, 0, 0,
2124 0, 0, 0, 0, 0, 0, 0, 0,
2125 0, 0, 0, 0, 0, 0, 0, 0,
2126 0, 0, 0, 0, 0, 0, 0, 0,
2127 0, 0, 0, 0, 0, 0, 0, 0
2132 int16_t *block = s->block[n];
2133 const int last_index = s->block_last_index[n];
2136 if (threshold < 0) {
2138 threshold = -threshold;
2142 /* Are all we could set to zero already zero? */
2143 if (last_index <= skip_dc - 1)
2146 for (i = 0; i <= last_index; i++) {
2147 const int j = s->intra_scantable.permutated[i];
2148 const int level = FFABS(block[j]);
2150 if (skip_dc && i == 0)
2154 } else if (level > 1) {
2160 if (score >= threshold)
2162 for (i = skip_dc; i <= last_index; i++) {
2163 const int j = s->intra_scantable.permutated[i];
2167 s->block_last_index[n] = 0;
2169 s->block_last_index[n] = -1;
2172 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2176 const int maxlevel = s->max_qcoeff;
2177 const int minlevel = s->min_qcoeff;
2181 i = 1; // skip clipping of intra dc
2185 for (; i <= last_index; i++) {
2186 const int j = s->intra_scantable.permutated[i];
2187 int level = block[j];
2189 if (level > maxlevel) {
2192 } else if (level < minlevel) {
2200 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2201 av_log(s->avctx, AV_LOG_INFO,
2202 "warning, clipping %d dct coefficients to %d..%d\n",
2203 overflow, minlevel, maxlevel);
2206 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2210 for (y = 0; y < 8; y++) {
2211 for (x = 0; x < 8; x++) {
2217 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2218 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2219 int v = ptr[x2 + y2 * stride];
2225 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2230 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2231 int motion_x, int motion_y,
2232 int mb_block_height,
2236 int16_t weight[12][64];
2237 int16_t orig[12][64];
2238 const int mb_x = s->mb_x;
2239 const int mb_y = s->mb_y;
2242 int dct_offset = s->linesize * 8; // default for progressive frames
2243 int uv_dct_offset = s->uvlinesize * 8;
2244 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2245 ptrdiff_t wrap_y, wrap_c;
2247 for (i = 0; i < mb_block_count; i++)
2248 skip_dct[i] = s->skipdct;
2250 if (s->adaptive_quant) {
2251 const int last_qp = s->qscale;
2252 const int mb_xy = mb_x + mb_y * s->mb_stride;
2254 s->lambda = s->lambda_table[mb_xy];
2257 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2258 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2259 s->dquant = s->qscale - last_qp;
2261 if (s->out_format == FMT_H263) {
2262 s->dquant = av_clip(s->dquant, -2, 2);
2264 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2266 if (s->pict_type == AV_PICTURE_TYPE_B) {
2267 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2270 if (s->mv_type == MV_TYPE_8X8)
2276 ff_set_qscale(s, last_qp + s->dquant);
2277 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2278 ff_set_qscale(s, s->qscale + s->dquant);
2280 wrap_y = s->linesize;
2281 wrap_c = s->uvlinesize;
2282 ptr_y = s->new_picture.f->data[0] +
2283 (mb_y * 16 * wrap_y) + mb_x * 16;
2284 ptr_cb = s->new_picture.f->data[1] +
2285 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2286 ptr_cr = s->new_picture.f->data[2] +
2287 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2289 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2290 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2291 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2292 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2293 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2295 16, 16, mb_x * 16, mb_y * 16,
2296 s->width, s->height);
2298 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2300 mb_block_width, mb_block_height,
2301 mb_x * mb_block_width, mb_y * mb_block_height,
2303 ptr_cb = ebuf + 16 * wrap_y;
2304 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2306 mb_block_width, mb_block_height,
2307 mb_x * mb_block_width, mb_y * mb_block_height,
2309 ptr_cr = ebuf + 16 * wrap_y + 16;
2313 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2314 int progressive_score, interlaced_score;
2316 s->interlaced_dct = 0;
2317 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2318 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2319 NULL, wrap_y, 8) - 400;
2321 if (progressive_score > 0) {
2322 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2323 NULL, wrap_y * 2, 8) +
2324 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2325 NULL, wrap_y * 2, 8);
2326 if (progressive_score > interlaced_score) {
2327 s->interlaced_dct = 1;
2329 dct_offset = wrap_y;
2330 uv_dct_offset = wrap_c;
2332 if (s->chroma_format == CHROMA_422 ||
2333 s->chroma_format == CHROMA_444)
2339 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2340 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2341 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2342 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2344 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2348 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2349 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2350 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2351 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2352 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2353 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2354 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2355 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2356 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2357 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2358 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2359 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2363 op_pixels_func (*op_pix)[4];
2364 qpel_mc_func (*op_qpix)[16];
2365 uint8_t *dest_y, *dest_cb, *dest_cr;
2367 dest_y = s->dest[0];
2368 dest_cb = s->dest[1];
2369 dest_cr = s->dest[2];
2371 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2372 op_pix = s->hdsp.put_pixels_tab;
2373 op_qpix = s->qdsp.put_qpel_pixels_tab;
2375 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2376 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2379 if (s->mv_dir & MV_DIR_FORWARD) {
2380 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2381 s->last_picture.f->data,
2383 op_pix = s->hdsp.avg_pixels_tab;
2384 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2386 if (s->mv_dir & MV_DIR_BACKWARD) {
2387 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2388 s->next_picture.f->data,
2392 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2393 int progressive_score, interlaced_score;
2395 s->interlaced_dct = 0;
2396 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2397 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2401 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2402 progressive_score -= 400;
2404 if (progressive_score > 0) {
2405 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2407 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2411 if (progressive_score > interlaced_score) {
2412 s->interlaced_dct = 1;
2414 dct_offset = wrap_y;
2415 uv_dct_offset = wrap_c;
2417 if (s->chroma_format == CHROMA_422)
2423 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2424 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2425 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2426 dest_y + dct_offset, wrap_y);
2427 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2428 dest_y + dct_offset + 8, wrap_y);
2430 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2434 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2435 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2436 if (!s->chroma_y_shift) { /* 422 */
2437 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2438 dest_cb + uv_dct_offset, wrap_c);
2439 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2440 dest_cr + uv_dct_offset, wrap_c);
2443 /* pre quantization */
2444 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2445 2 * s->qscale * s->qscale) {
2447 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2449 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2451 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2452 wrap_y, 8) < 20 * s->qscale)
2454 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2455 wrap_y, 8) < 20 * s->qscale)
2457 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2459 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2461 if (!s->chroma_y_shift) { /* 422 */
2462 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2463 dest_cb + uv_dct_offset,
2464 wrap_c, 8) < 20 * s->qscale)
2466 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2467 dest_cr + uv_dct_offset,
2468 wrap_c, 8) < 20 * s->qscale)
2474 if (s->quantizer_noise_shaping) {
2476 get_visual_weight(weight[0], ptr_y , wrap_y);
2478 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2480 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2482 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2484 get_visual_weight(weight[4], ptr_cb , wrap_c);
2486 get_visual_weight(weight[5], ptr_cr , wrap_c);
2487 if (!s->chroma_y_shift) { /* 422 */
2489 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2492 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2495 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2498 /* DCT & quantize */
2499 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2501 for (i = 0; i < mb_block_count; i++) {
2504 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2505 // FIXME we could decide to change to quantizer instead of
2507 // JS: I don't think that would be a good idea it could lower
2508 // quality instead of improve it. Just INTRADC clipping
2509 // deserves changes in quantizer
2511 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2513 s->block_last_index[i] = -1;
2515 if (s->quantizer_noise_shaping) {
2516 for (i = 0; i < mb_block_count; i++) {
2518 s->block_last_index[i] =
2519 dct_quantize_refine(s, s->block[i], weight[i],
2520 orig[i], i, s->qscale);
2525 if (s->luma_elim_threshold && !s->mb_intra)
2526 for (i = 0; i < 4; i++)
2527 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2528 if (s->chroma_elim_threshold && !s->mb_intra)
2529 for (i = 4; i < mb_block_count; i++)
2530 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2532 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2533 for (i = 0; i < mb_block_count; i++) {
2534 if (s->block_last_index[i] == -1)
2535 s->coded_score[i] = INT_MAX / 256;
2540 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2541 s->block_last_index[4] =
2542 s->block_last_index[5] = 0;
2544 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2545 if (!s->chroma_y_shift) { /* 422 / 444 */
2546 for (i=6; i<12; i++) {
2547 s->block_last_index[i] = 0;
2548 s->block[i][0] = s->block[4][0];
2553 // non c quantize code returns incorrect block_last_index FIXME
2554 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2555 for (i = 0; i < mb_block_count; i++) {
2557 if (s->block_last_index[i] > 0) {
2558 for (j = 63; j > 0; j--) {
2559 if (s->block[i][s->intra_scantable.permutated[j]])
2562 s->block_last_index[i] = j;
2567 /* huffman encode */
2568 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2569 case AV_CODEC_ID_MPEG1VIDEO:
2570 case AV_CODEC_ID_MPEG2VIDEO:
2571 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2572 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2574 case AV_CODEC_ID_MPEG4:
2575 if (CONFIG_MPEG4_ENCODER)
2576 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2578 case AV_CODEC_ID_MSMPEG4V2:
2579 case AV_CODEC_ID_MSMPEG4V3:
2580 case AV_CODEC_ID_WMV1:
2581 if (CONFIG_MSMPEG4_ENCODER)
2582 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2584 case AV_CODEC_ID_WMV2:
2585 if (CONFIG_WMV2_ENCODER)
2586 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2588 case AV_CODEC_ID_H261:
2589 if (CONFIG_H261_ENCODER)
2590 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2592 case AV_CODEC_ID_H263:
2593 case AV_CODEC_ID_H263P:
2594 case AV_CODEC_ID_FLV1:
2595 case AV_CODEC_ID_RV10:
2596 case AV_CODEC_ID_RV20:
2597 if (CONFIG_H263_ENCODER)
2598 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2600 case AV_CODEC_ID_MJPEG:
2601 case AV_CODEC_ID_AMV:
2602 if (CONFIG_MJPEG_ENCODER)
2603 ff_mjpeg_encode_mb(s, s->block);
2605 case AV_CODEC_ID_SPEEDHQ:
2606 if (CONFIG_SPEEDHQ_ENCODER)
2607 ff_speedhq_encode_mb(s, s->block);
2614 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2616 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2617 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2618 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2621 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2624 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2627 d->mb_skip_run= s->mb_skip_run;
2629 d->last_dc[i] = s->last_dc[i];
2632 d->mv_bits= s->mv_bits;
2633 d->i_tex_bits= s->i_tex_bits;
2634 d->p_tex_bits= s->p_tex_bits;
2635 d->i_count= s->i_count;
2636 d->f_count= s->f_count;
2637 d->b_count= s->b_count;
2638 d->skip_count= s->skip_count;
2639 d->misc_bits= s->misc_bits;
2643 d->qscale= s->qscale;
2644 d->dquant= s->dquant;
2646 d->esc3_level_length= s->esc3_level_length;
2649 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2652 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2653 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2656 d->mb_skip_run= s->mb_skip_run;
2658 d->last_dc[i] = s->last_dc[i];
2661 d->mv_bits= s->mv_bits;
2662 d->i_tex_bits= s->i_tex_bits;
2663 d->p_tex_bits= s->p_tex_bits;
2664 d->i_count= s->i_count;
2665 d->f_count= s->f_count;
2666 d->b_count= s->b_count;
2667 d->skip_count= s->skip_count;
2668 d->misc_bits= s->misc_bits;
2670 d->mb_intra= s->mb_intra;
2671 d->mb_skipped= s->mb_skipped;
2672 d->mv_type= s->mv_type;
2673 d->mv_dir= s->mv_dir;
2675 if(s->data_partitioning){
2677 d->tex_pb= s->tex_pb;
2681 d->block_last_index[i]= s->block_last_index[i];
2682 d->interlaced_dct= s->interlaced_dct;
2683 d->qscale= s->qscale;
2685 d->esc3_level_length= s->esc3_level_length;
2688 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2689 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2690 int *dmin, int *next_block, int motion_x, int motion_y)
2693 uint8_t *dest_backup[3];
2695 copy_context_before_encode(s, backup, type);
2697 s->block= s->blocks[*next_block];
2698 s->pb= pb[*next_block];
2699 if(s->data_partitioning){
2700 s->pb2 = pb2 [*next_block];
2701 s->tex_pb= tex_pb[*next_block];
2705 memcpy(dest_backup, s->dest, sizeof(s->dest));
2706 s->dest[0] = s->sc.rd_scratchpad;
2707 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2708 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2709 av_assert0(s->linesize >= 32); //FIXME
2712 encode_mb(s, motion_x, motion_y);
2714 score= put_bits_count(&s->pb);
2715 if(s->data_partitioning){
2716 score+= put_bits_count(&s->pb2);
2717 score+= put_bits_count(&s->tex_pb);
2720 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2721 ff_mpv_reconstruct_mb(s, s->block);
2723 score *= s->lambda2;
2724 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2728 memcpy(s->dest, dest_backup, sizeof(s->dest));
2735 copy_context_after_encode(best, s, type);
2739 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2740 const uint32_t *sq = ff_square_tab + 256;
2745 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2746 else if(w==8 && h==8)
2747 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2751 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2760 static int sse_mb(MpegEncContext *s){
2764 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2765 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2768 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2769 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2770 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2771 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2773 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2774 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2775 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2778 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2779 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2780 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2783 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2784 MpegEncContext *s= *(void**)arg;
2788 s->me.dia_size= s->avctx->pre_dia_size;
2789 s->first_slice_line=1;
2790 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2791 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2792 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2794 s->first_slice_line=0;
2802 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2803 MpegEncContext *s= *(void**)arg;
2805 s->me.dia_size= s->avctx->dia_size;
2806 s->first_slice_line=1;
2807 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2808 s->mb_x=0; //for block init below
2809 ff_init_block_index(s);
2810 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2811 s->block_index[0]+=2;
2812 s->block_index[1]+=2;
2813 s->block_index[2]+=2;
2814 s->block_index[3]+=2;
2816 /* compute motion vector & mb_type and store in context */
2817 if(s->pict_type==AV_PICTURE_TYPE_B)
2818 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2820 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2822 s->first_slice_line=0;
2827 static int mb_var_thread(AVCodecContext *c, void *arg){
2828 MpegEncContext *s= *(void**)arg;
2831 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2832 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2835 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2837 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2839 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2840 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2842 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2843 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2844 s->me.mb_var_sum_temp += varc;
2850 static void write_slice_end(MpegEncContext *s){
2851 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2852 if(s->partitioned_frame){
2853 ff_mpeg4_merge_partitions(s);
2856 ff_mpeg4_stuffing(&s->pb);
2857 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2858 ff_mjpeg_encode_stuffing(s);
2859 } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2860 ff_speedhq_end_slice(s);
2863 flush_put_bits(&s->pb);
2865 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2866 s->misc_bits+= get_bits_diff(s);
2869 static void write_mb_info(MpegEncContext *s)
2871 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2872 int offset = put_bits_count(&s->pb);
2873 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2874 int gobn = s->mb_y / s->gob_index;
2876 if (CONFIG_H263_ENCODER)
2877 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2878 bytestream_put_le32(&ptr, offset);
2879 bytestream_put_byte(&ptr, s->qscale);
2880 bytestream_put_byte(&ptr, gobn);
2881 bytestream_put_le16(&ptr, mba);
2882 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2883 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2884 /* 4MV not implemented */
2885 bytestream_put_byte(&ptr, 0); /* hmv2 */
2886 bytestream_put_byte(&ptr, 0); /* vmv2 */
2889 static void update_mb_info(MpegEncContext *s, int startcode)
2893 if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2894 s->mb_info_size += 12;
2895 s->prev_mb_info = s->last_mb_info;
2898 s->prev_mb_info = put_bytes_count(&s->pb, 0);
2899 /* This might have incremented mb_info_size above, and we return without
2900 * actually writing any info into that slot yet. But in that case,
2901 * this will be called again at the start of the after writing the
2902 * start code, actually writing the mb info. */
2906 s->last_mb_info = put_bytes_count(&s->pb, 0);
2907 if (!s->mb_info_size)
2908 s->mb_info_size += 12;
2912 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2914 if (put_bytes_left(&s->pb, 0) < threshold
2915 && s->slice_context_count == 1
2916 && s->pb.buf == s->avctx->internal->byte_buffer) {
2917 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2918 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2920 uint8_t *new_buffer = NULL;
2921 int new_buffer_size = 0;
2923 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2924 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2925 return AVERROR(ENOMEM);
2930 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2931 s->avctx->internal->byte_buffer_size + size_increase);
2933 return AVERROR(ENOMEM);
2935 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2936 av_free(s->avctx->internal->byte_buffer);
2937 s->avctx->internal->byte_buffer = new_buffer;
2938 s->avctx->internal->byte_buffer_size = new_buffer_size;
2939 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2940 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2941 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2943 if (put_bytes_left(&s->pb, 0) < threshold)
2944 return AVERROR(EINVAL);
2948 static int encode_thread(AVCodecContext *c, void *arg){
2949 MpegEncContext *s= *(void**)arg;
2950 int mb_x, mb_y, mb_y_order;
2951 int chr_h= 16>>s->chroma_y_shift;
2953 MpegEncContext best_s = { 0 }, backup_s;
2954 uint8_t bit_buf[2][MAX_MB_BYTES];
2955 uint8_t bit_buf2[2][MAX_MB_BYTES];
2956 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2957 PutBitContext pb[2], pb2[2], tex_pb[2];
2960 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2961 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2962 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2965 s->last_bits= put_bits_count(&s->pb);
2976 /* init last dc values */
2977 /* note: quant matrix value (8) is implied here */
2978 s->last_dc[i] = 128 << s->intra_dc_precision;
2980 s->current_picture.encoding_error[i] = 0;
2982 if(s->codec_id==AV_CODEC_ID_AMV){
2983 s->last_dc[0] = 128*8/13;
2984 s->last_dc[1] = 128*8/14;
2985 s->last_dc[2] = 128*8/14;
2988 memset(s->last_mv, 0, sizeof(s->last_mv));
2992 switch(s->codec_id){
2993 case AV_CODEC_ID_H263:
2994 case AV_CODEC_ID_H263P:
2995 case AV_CODEC_ID_FLV1:
2996 if (CONFIG_H263_ENCODER)
2997 s->gob_index = H263_GOB_HEIGHT(s->height);
2999 case AV_CODEC_ID_MPEG4:
3000 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
3001 ff_mpeg4_init_partitions(s);
3007 s->first_slice_line = 1;
3008 s->ptr_lastgob = s->pb.buf;
3009 for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
3010 if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
3012 mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
3013 if (first_in_slice && mb_y_order != s->start_mb_y)
3014 ff_speedhq_end_slice(s);
3015 s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
3022 ff_set_qscale(s, s->qscale);
3023 ff_init_block_index(s);
3025 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3026 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3027 int mb_type= s->mb_type[xy];
3031 int size_increase = s->avctx->internal->byte_buffer_size/4
3032 + s->mb_width*MAX_MB_BYTES;
3034 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
3035 if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
3036 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3039 if(s->data_partitioning){
3040 if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
3041 put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
3042 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3048 s->mb_y = mb_y; // moved into loop, can get changed by H.261
3049 ff_update_block_index(s);
3051 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3052 ff_h261_reorder_mb_index(s);
3053 xy= s->mb_y*s->mb_stride + s->mb_x;
3054 mb_type= s->mb_type[xy];
3057 /* write gob / video packet header */
3059 int current_packet_size, is_gob_start;
3061 current_packet_size = put_bytes_count(&s->pb, 1)
3062 - (s->ptr_lastgob - s->pb.buf);
3064 is_gob_start = s->rtp_payload_size &&
3065 current_packet_size >= s->rtp_payload_size &&
3068 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3070 switch(s->codec_id){
3071 case AV_CODEC_ID_H263:
3072 case AV_CODEC_ID_H263P:
3073 if(!s->h263_slice_structured)
3074 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3076 case AV_CODEC_ID_MPEG2VIDEO:
3077 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3078 case AV_CODEC_ID_MPEG1VIDEO:
3079 if(s->mb_skip_run) is_gob_start=0;
3081 case AV_CODEC_ID_MJPEG:
3082 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3087 if(s->start_mb_y != mb_y || mb_x!=0){
3090 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3091 ff_mpeg4_init_partitions(s);
3095 av_assert2((put_bits_count(&s->pb)&7) == 0);
3096 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3098 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3099 int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
3100 int d = 100 / s->error_rate;
3102 current_packet_size=0;
3103 s->pb.buf_ptr= s->ptr_lastgob;
3104 av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3108 #if FF_API_RTP_CALLBACK
3109 FF_DISABLE_DEPRECATION_WARNINGS
3110 if (s->avctx->rtp_callback){
3111 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3112 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3114 FF_ENABLE_DEPRECATION_WARNINGS
3116 update_mb_info(s, 1);
3118 switch(s->codec_id){
3119 case AV_CODEC_ID_MPEG4:
3120 if (CONFIG_MPEG4_ENCODER) {
3121 ff_mpeg4_encode_video_packet_header(s);
3122 ff_mpeg4_clean_buffers(s);
3125 case AV_CODEC_ID_MPEG1VIDEO:
3126 case AV_CODEC_ID_MPEG2VIDEO:
3127 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3128 ff_mpeg1_encode_slice_header(s);
3129 ff_mpeg1_clean_buffers(s);
3132 case AV_CODEC_ID_H263:
3133 case AV_CODEC_ID_H263P:
3134 if (CONFIG_H263_ENCODER)
3135 ff_h263_encode_gob_header(s, mb_y);
3139 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3140 int bits= put_bits_count(&s->pb);
3141 s->misc_bits+= bits - s->last_bits;
3145 s->ptr_lastgob += current_packet_size;
3146 s->first_slice_line=1;
3147 s->resync_mb_x=mb_x;
3148 s->resync_mb_y=mb_y;
3152 if( (s->resync_mb_x == s->mb_x)
3153 && s->resync_mb_y+1 == s->mb_y){
3154 s->first_slice_line=0;
3158 s->dquant=0; //only for QP_RD
3160 update_mb_info(s, 0);
3162 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3164 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3166 copy_context_before_encode(&backup_s, s, -1);
3168 best_s.data_partitioning= s->data_partitioning;
3169 best_s.partitioned_frame= s->partitioned_frame;
3170 if(s->data_partitioning){
3171 backup_s.pb2= s->pb2;
3172 backup_s.tex_pb= s->tex_pb;
3175 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3176 s->mv_dir = MV_DIR_FORWARD;
3177 s->mv_type = MV_TYPE_16X16;
3179 s->mv[0][0][0] = s->p_mv_table[xy][0];
3180 s->mv[0][0][1] = s->p_mv_table[xy][1];
3181 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3182 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3184 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3185 s->mv_dir = MV_DIR_FORWARD;
3186 s->mv_type = MV_TYPE_FIELD;
3189 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3190 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3191 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3193 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3194 &dmin, &next_block, 0, 0);
3196 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3197 s->mv_dir = MV_DIR_FORWARD;
3198 s->mv_type = MV_TYPE_16X16;
3202 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3203 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3205 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3206 s->mv_dir = MV_DIR_FORWARD;
3207 s->mv_type = MV_TYPE_8X8;
3210 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3211 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3213 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3214 &dmin, &next_block, 0, 0);
3216 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3217 s->mv_dir = MV_DIR_FORWARD;
3218 s->mv_type = MV_TYPE_16X16;
3220 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3221 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3222 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3223 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3225 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3226 s->mv_dir = MV_DIR_BACKWARD;
3227 s->mv_type = MV_TYPE_16X16;
3229 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3230 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3231 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3232 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3234 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3235 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3236 s->mv_type = MV_TYPE_16X16;
3238 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3239 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3240 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3241 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3242 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3243 &dmin, &next_block, 0, 0);
3245 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3246 s->mv_dir = MV_DIR_FORWARD;
3247 s->mv_type = MV_TYPE_FIELD;
3250 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3251 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3252 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3254 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3255 &dmin, &next_block, 0, 0);
3257 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3258 s->mv_dir = MV_DIR_BACKWARD;
3259 s->mv_type = MV_TYPE_FIELD;
3262 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3263 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3264 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3266 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3267 &dmin, &next_block, 0, 0);
3269 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3270 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3271 s->mv_type = MV_TYPE_FIELD;
3273 for(dir=0; dir<2; dir++){
3275 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3276 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3277 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3280 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3281 &dmin, &next_block, 0, 0);
3283 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3285 s->mv_type = MV_TYPE_16X16;
3289 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3290 &dmin, &next_block, 0, 0);
3291 if(s->h263_pred || s->h263_aic){
3293 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3295 ff_clean_intra_table_entries(s); //old mode?
3299 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3300 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3301 const int last_qp= backup_s.qscale;
3304 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3305 static const int dquant_tab[4]={-1,1,-2,2};
3306 int storecoefs = s->mb_intra && s->dc_val[0];
3308 av_assert2(backup_s.dquant == 0);
3311 s->mv_dir= best_s.mv_dir;
3312 s->mv_type = MV_TYPE_16X16;
3313 s->mb_intra= best_s.mb_intra;
3314 s->mv[0][0][0] = best_s.mv[0][0][0];
3315 s->mv[0][0][1] = best_s.mv[0][0][1];
3316 s->mv[1][0][0] = best_s.mv[1][0][0];
3317 s->mv[1][0][1] = best_s.mv[1][0][1];
3319 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3320 for(; qpi<4; qpi++){
3321 int dquant= dquant_tab[qpi];
3322 qp= last_qp + dquant;
3323 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3325 backup_s.dquant= dquant;
3328 dc[i]= s->dc_val[0][ s->block_index[i] ];
3329 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3333 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3334 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3335 if(best_s.qscale != qp){
3338 s->dc_val[0][ s->block_index[i] ]= dc[i];
3339 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3346 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3347 int mx= s->b_direct_mv_table[xy][0];
3348 int my= s->b_direct_mv_table[xy][1];
3350 backup_s.dquant = 0;
3351 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3353 ff_mpeg4_set_direct_mv(s, mx, my);
3354 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3355 &dmin, &next_block, mx, my);
3357 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3358 backup_s.dquant = 0;
3359 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3361 ff_mpeg4_set_direct_mv(s, 0, 0);
3362 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3363 &dmin, &next_block, 0, 0);
3365 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3368 coded |= s->block_last_index[i];
3371 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3372 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3373 mx=my=0; //FIXME find the one we actually used
3374 ff_mpeg4_set_direct_mv(s, mx, my);
3375 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3383 s->mv_dir= best_s.mv_dir;
3384 s->mv_type = best_s.mv_type;
3386 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3387 s->mv[0][0][1] = best_s.mv[0][0][1];
3388 s->mv[1][0][0] = best_s.mv[1][0][0];
3389 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3392 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3393 &dmin, &next_block, mx, my);
3398 s->current_picture.qscale_table[xy] = best_s.qscale;
3400 copy_context_after_encode(s, &best_s, -1);
3402 pb_bits_count= put_bits_count(&s->pb);
3403 flush_put_bits(&s->pb);
3404 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3407 if(s->data_partitioning){
3408 pb2_bits_count= put_bits_count(&s->pb2);
3409 flush_put_bits(&s->pb2);
3410 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3411 s->pb2= backup_s.pb2;
3413 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3414 flush_put_bits(&s->tex_pb);
3415 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3416 s->tex_pb= backup_s.tex_pb;
3418 s->last_bits= put_bits_count(&s->pb);
3420 if (CONFIG_H263_ENCODER &&
3421 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3422 ff_h263_update_motion_val(s);
3424 if(next_block==0){ //FIXME 16 vs linesize16
3425 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3426 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3427 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3430 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3431 ff_mpv_reconstruct_mb(s, s->block);
3433 int motion_x = 0, motion_y = 0;
3434 s->mv_type=MV_TYPE_16X16;
3435 // only one MB-Type possible
3438 case CANDIDATE_MB_TYPE_INTRA:
3441 motion_x= s->mv[0][0][0] = 0;
3442 motion_y= s->mv[0][0][1] = 0;
3444 case CANDIDATE_MB_TYPE_INTER:
3445 s->mv_dir = MV_DIR_FORWARD;
3447 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3448 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3450 case CANDIDATE_MB_TYPE_INTER_I:
3451 s->mv_dir = MV_DIR_FORWARD;
3452 s->mv_type = MV_TYPE_FIELD;
3455 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3456 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3457 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3460 case CANDIDATE_MB_TYPE_INTER4V:
3461 s->mv_dir = MV_DIR_FORWARD;
3462 s->mv_type = MV_TYPE_8X8;
3465 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3466 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3469 case CANDIDATE_MB_TYPE_DIRECT:
3470 if (CONFIG_MPEG4_ENCODER) {
3471 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3473 motion_x=s->b_direct_mv_table[xy][0];
3474 motion_y=s->b_direct_mv_table[xy][1];
3475 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3478 case CANDIDATE_MB_TYPE_DIRECT0:
3479 if (CONFIG_MPEG4_ENCODER) {
3480 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3482 ff_mpeg4_set_direct_mv(s, 0, 0);
3485 case CANDIDATE_MB_TYPE_BIDIR:
3486 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3488 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3489 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3490 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3491 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3493 case CANDIDATE_MB_TYPE_BACKWARD:
3494 s->mv_dir = MV_DIR_BACKWARD;
3496 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3497 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3499 case CANDIDATE_MB_TYPE_FORWARD:
3500 s->mv_dir = MV_DIR_FORWARD;
3502 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3503 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3505 case CANDIDATE_MB_TYPE_FORWARD_I:
3506 s->mv_dir = MV_DIR_FORWARD;
3507 s->mv_type = MV_TYPE_FIELD;
3510 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3511 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3512 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3515 case CANDIDATE_MB_TYPE_BACKWARD_I:
3516 s->mv_dir = MV_DIR_BACKWARD;
3517 s->mv_type = MV_TYPE_FIELD;
3520 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3521 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3522 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3525 case CANDIDATE_MB_TYPE_BIDIR_I:
3526 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3527 s->mv_type = MV_TYPE_FIELD;
3529 for(dir=0; dir<2; dir++){
3531 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3532 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3533 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3538 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3541 encode_mb(s, motion_x, motion_y);
3543 // RAL: Update last macroblock type
3544 s->last_mv_dir = s->mv_dir;
3546 if (CONFIG_H263_ENCODER &&
3547 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3548 ff_h263_update_motion_val(s);
3550 ff_mpv_reconstruct_mb(s, s->block);
3553 /* clean the MV table in IPS frames for direct mode in B-frames */
3554 if(s->mb_intra /* && I,P,S_TYPE */){
3555 s->p_mv_table[xy][0]=0;
3556 s->p_mv_table[xy][1]=0;
3559 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3563 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3564 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3566 s->current_picture.encoding_error[0] += sse(
3567 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3568 s->dest[0], w, h, s->linesize);
3569 s->current_picture.encoding_error[1] += sse(
3570 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3571 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3572 s->current_picture.encoding_error[2] += sse(
3573 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3574 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3577 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3578 ff_h263_loop_filter(s);
3580 ff_dlog(s->avctx, "MB %d %d bits\n",
3581 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3585 //not beautiful here but we must write it before flushing so it has to be here
3586 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3587 ff_msmpeg4_encode_ext_header(s);
3591 #if FF_API_RTP_CALLBACK
3592 FF_DISABLE_DEPRECATION_WARNINGS
3593 /* Send the last GOB if RTP */
3594 if (s->avctx->rtp_callback) {
3595 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3596 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3597 /* Call the RTP callback to send the last GOB */
3599 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3601 FF_ENABLE_DEPRECATION_WARNINGS
3607 #define MERGE(field) dst->field += src->field; src->field=0
3608 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3609 MERGE(me.scene_change_score);
3610 MERGE(me.mc_mb_var_sum_temp);
3611 MERGE(me.mb_var_sum_temp);
3614 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3617 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3618 MERGE(dct_count[1]);
3627 MERGE(er.error_count);
3628 MERGE(padding_bug_score);
3629 MERGE(current_picture.encoding_error[0]);
3630 MERGE(current_picture.encoding_error[1]);
3631 MERGE(current_picture.encoding_error[2]);
3633 if (dst->noise_reduction){
3634 for(i=0; i<64; i++){
3635 MERGE(dct_error_sum[0][i]);
3636 MERGE(dct_error_sum[1][i]);
3640 av_assert1(put_bits_count(&src->pb) % 8 ==0);
3641 av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3642 ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3643 flush_put_bits(&dst->pb);
3646 static int estimate_qp(MpegEncContext *s, int dry_run){
3647 if (s->next_lambda){
3648 s->current_picture_ptr->f->quality =
3649 s->current_picture.f->quality = s->next_lambda;
3650 if(!dry_run) s->next_lambda= 0;
3651 } else if (!s->fixed_qscale) {
3652 int quality = ff_rate_estimate_qscale(s, dry_run);
3653 s->current_picture_ptr->f->quality =
3654 s->current_picture.f->quality = quality;
3655 if (s->current_picture.f->quality < 0)
3659 if(s->adaptive_quant){
3660 switch(s->codec_id){
3661 case AV_CODEC_ID_MPEG4:
3662 if (CONFIG_MPEG4_ENCODER)
3663 ff_clean_mpeg4_qscales(s);
3665 case AV_CODEC_ID_H263:
3666 case AV_CODEC_ID_H263P:
3667 case AV_CODEC_ID_FLV1:
3668 if (CONFIG_H263_ENCODER)
3669 ff_clean_h263_qscales(s);
3672 ff_init_qscale_tab(s);
3675 s->lambda= s->lambda_table[0];
3678 s->lambda = s->current_picture.f->quality;
3683 /* must be called before writing the header */
3684 static void set_frame_distances(MpegEncContext * s){
3685 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3686 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3688 if(s->pict_type==AV_PICTURE_TYPE_B){
3689 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3690 av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3692 s->pp_time= s->time - s->last_non_b_time;
3693 s->last_non_b_time= s->time;
3694 av_assert1(s->picture_number==0 || s->pp_time > 0);
3698 static int encode_picture(MpegEncContext *s, int picture_number)
3702 int context_count = s->slice_context_count;
3704 s->picture_number = picture_number;
3706 /* Reset the average MB variance */
3707 s->me.mb_var_sum_temp =
3708 s->me.mc_mb_var_sum_temp = 0;
3710 /* we need to initialize some time vars before we can encode B-frames */
3711 // RAL: Condition added for MPEG1VIDEO
3712 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3713 set_frame_distances(s);
3714 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3715 ff_set_mpeg4_time(s);
3717 s->me.scene_change_score=0;
3719 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3721 if(s->pict_type==AV_PICTURE_TYPE_I){
3722 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3723 else s->no_rounding=0;
3724 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3725 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3726 s->no_rounding ^= 1;
3729 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3730 if (estimate_qp(s,1) < 0)
3732 ff_get_2pass_fcode(s);
3733 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3734 if(s->pict_type==AV_PICTURE_TYPE_B)
3735 s->lambda= s->last_lambda_for[s->pict_type];
3737 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3741 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3742 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3743 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3744 s->q_chroma_intra_matrix = s->q_intra_matrix;
3745 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3748 s->mb_intra=0; //for the rate distortion & bit compare functions
3749 for(i=1; i<context_count; i++){
3750 ret = ff_update_duplicate_context(s->thread_context[i], s);
3758 /* Estimate motion for every MB */
3759 if(s->pict_type != AV_PICTURE_TYPE_I){
3760 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3761 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3762 if (s->pict_type != AV_PICTURE_TYPE_B) {
3763 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3765 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3769 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3770 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3772 for(i=0; i<s->mb_stride*s->mb_height; i++)
3773 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3775 if(!s->fixed_qscale){
3776 /* finding spatial complexity for I-frame rate control */
3777 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3780 for(i=1; i<context_count; i++){
3781 merge_context_after_me(s, s->thread_context[i]);
3783 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3784 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3787 if (s->me.scene_change_score > s->scenechange_threshold &&
3788 s->pict_type == AV_PICTURE_TYPE_P) {
3789 s->pict_type= AV_PICTURE_TYPE_I;
3790 for(i=0; i<s->mb_stride*s->mb_height; i++)
3791 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3792 if(s->msmpeg4_version >= 3)
3794 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3795 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3799 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3800 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3802 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3804 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3805 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3806 s->f_code= FFMAX3(s->f_code, a, b);
3809 ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
3810 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3811 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3815 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3816 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3821 if(s->pict_type==AV_PICTURE_TYPE_B){
3824 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3825 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3826 s->f_code = FFMAX(a, b);
3828 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3829 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3830 s->b_code = FFMAX(a, b);
3832 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3833 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3834 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3835 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3836 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3838 for(dir=0; dir<2; dir++){
3841 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3842 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3843 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3844 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3852 if (estimate_qp(s, 0) < 0)
3855 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3856 s->pict_type == AV_PICTURE_TYPE_I &&
3857 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3858 s->qscale= 3; //reduce clipping problems
3860 if (s->out_format == FMT_MJPEG) {
3861 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3862 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3864 if (s->avctx->intra_matrix) {
3866 luma_matrix = s->avctx->intra_matrix;
3868 if (s->avctx->chroma_intra_matrix)
3869 chroma_matrix = s->avctx->chroma_intra_matrix;
3871 /* for mjpeg, we do include qscale in the matrix */
3873 int j = s->idsp.idct_permutation[i];
3875 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3876 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3878 s->y_dc_scale_table=
3879 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3880 s->chroma_intra_matrix[0] =
3881 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3882 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3883 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3884 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3885 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3888 if(s->codec_id == AV_CODEC_ID_AMV){
3889 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3890 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3892 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3894 s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3895 s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3897 s->y_dc_scale_table= y;
3898 s->c_dc_scale_table= c;
3899 s->intra_matrix[0] = 13;
3900 s->chroma_intra_matrix[0] = 14;
3901 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3902 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3903 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3904 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3908 if (s->out_format == FMT_SPEEDHQ) {
3909 s->y_dc_scale_table=
3910 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3913 //FIXME var duplication
3914 s->current_picture_ptr->f->key_frame =
3915 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3916 s->current_picture_ptr->f->pict_type =
3917 s->current_picture.f->pict_type = s->pict_type;
3919 if (s->current_picture.f->key_frame)
3920 s->picture_in_gop_number=0;
3922 s->mb_x = s->mb_y = 0;
3923 s->last_bits= put_bits_count(&s->pb);
3924 switch(s->out_format) {
3926 if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3927 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3928 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3931 if (CONFIG_SPEEDHQ_ENCODER)
3932 ff_speedhq_encode_picture_header(s);
3935 if (CONFIG_H261_ENCODER)
3936 ff_h261_encode_picture_header(s, picture_number);
3939 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3940 ff_wmv2_encode_picture_header(s, picture_number);
3941 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3942 ff_msmpeg4_encode_picture_header(s, picture_number);
3943 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3944 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3947 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3948 ret = ff_rv10_encode_picture_header(s, picture_number);
3952 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3953 ff_rv20_encode_picture_header(s, picture_number);
3954 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3955 ff_flv_encode_picture_header(s, picture_number);
3956 else if (CONFIG_H263_ENCODER)
3957 ff_h263_encode_picture_header(s, picture_number);
3960 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3961 ff_mpeg1_encode_picture_header(s, picture_number);
3966 bits= put_bits_count(&s->pb);
3967 s->header_bits= bits - s->last_bits;
3969 for(i=1; i<context_count; i++){
3970 update_duplicate_context_after_me(s->thread_context[i], s);
3972 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3973 for(i=1; i<context_count; i++){
3974 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3975 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3976 merge_context_after_encode(s, s->thread_context[i]);
3982 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3983 const int intra= s->mb_intra;
3986 s->dct_count[intra]++;
3988 for(i=0; i<64; i++){
3989 int level= block[i];
3993 s->dct_error_sum[intra][i] += level;
3994 level -= s->dct_offset[intra][i];
3995 if(level<0) level=0;
3997 s->dct_error_sum[intra][i] -= level;
3998 level += s->dct_offset[intra][i];
3999 if(level>0) level=0;
4006 static int dct_quantize_trellis_c(MpegEncContext *s,
4007 int16_t *block, int n,
4008 int qscale, int *overflow){
4010 const uint16_t *matrix;
4011 const uint8_t *scantable;
4012 const uint8_t *perm_scantable;
4014 unsigned int threshold1, threshold2;
4026 int coeff_count[64];
4027 int qmul, qadd, start_i, last_non_zero, i, dc;
4028 const int esc_length= s->ac_esc_length;
4030 uint8_t * last_length;
4031 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4034 s->fdsp.fdct(block);
4036 if(s->dct_error_sum)
4037 s->denoise_dct(s, block);
4039 qadd= ((qscale-1)|1)*8;
4041 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4042 else mpeg2_qscale = qscale << 1;
4046 scantable= s->intra_scantable.scantable;
4047 perm_scantable= s->intra_scantable.permutated;
4055 /* For AIC we skip quant/dequant of INTRADC */
4060 /* note: block[0] is assumed to be positive */
4061 block[0] = (block[0] + (q >> 1)) / q;
4064 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4065 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4066 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4067 bias= 1<<(QMAT_SHIFT-1);
4069 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4070 length = s->intra_chroma_ac_vlc_length;
4071 last_length= s->intra_chroma_ac_vlc_last_length;
4073 length = s->intra_ac_vlc_length;
4074 last_length= s->intra_ac_vlc_last_length;
4077 scantable= s->inter_scantable.scantable;
4078 perm_scantable= s->inter_scantable.permutated;
4081 qmat = s->q_inter_matrix[qscale];
4082 matrix = s->inter_matrix;
4083 length = s->inter_ac_vlc_length;
4084 last_length= s->inter_ac_vlc_last_length;
4088 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4089 threshold2= (threshold1<<1);
4091 for(i=63; i>=start_i; i--) {
4092 const int j = scantable[i];
4093 int level = block[j] * qmat[j];
4095 if(((unsigned)(level+threshold1))>threshold2){
4101 for(i=start_i; i<=last_non_zero; i++) {
4102 const int j = scantable[i];
4103 int level = block[j] * qmat[j];
4105 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4106 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4107 if(((unsigned)(level+threshold1))>threshold2){
4109 level= (bias + level)>>QMAT_SHIFT;
4111 coeff[1][i]= level-1;
4112 // coeff[2][k]= level-2;
4114 level= (bias - level)>>QMAT_SHIFT;
4115 coeff[0][i]= -level;
4116 coeff[1][i]= -level+1;
4117 // coeff[2][k]= -level+2;
4119 coeff_count[i]= FFMIN(level, 2);
4120 av_assert2(coeff_count[i]);
4123 coeff[0][i]= (level>>31)|1;
4128 *overflow= s->max_qcoeff < max; //overflow might have happened
4130 if(last_non_zero < start_i){
4131 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4132 return last_non_zero;
4135 score_tab[start_i]= 0;
4136 survivor[0]= start_i;
4139 for(i=start_i; i<=last_non_zero; i++){
4140 int level_index, j, zero_distortion;
4141 int dct_coeff= FFABS(block[ scantable[i] ]);
4142 int best_score=256*256*256*120;
4144 if (s->fdsp.fdct == ff_fdct_ifast)
4145 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4146 zero_distortion= dct_coeff*dct_coeff;
4148 for(level_index=0; level_index < coeff_count[i]; level_index++){
4150 int level= coeff[level_index][i];
4151 const int alevel= FFABS(level);
4156 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4157 unquant_coeff= alevel*qmul + qadd;
4158 } else if(s->out_format == FMT_MJPEG) {
4159 j = s->idsp.idct_permutation[scantable[i]];
4160 unquant_coeff = alevel * matrix[j] * 8;
4162 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4164 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4165 unquant_coeff = (unquant_coeff - 1) | 1;
4167 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4168 unquant_coeff = (unquant_coeff - 1) | 1;
4173 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4175 if((level&(~127)) == 0){
4176 for(j=survivor_count-1; j>=0; j--){
4177 int run= i - survivor[j];
4178 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4179 score += score_tab[i-run];
4181 if(score < best_score){
4184 level_tab[i+1]= level-64;
4188 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4189 for(j=survivor_count-1; j>=0; j--){
4190 int run= i - survivor[j];
4191 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4192 score += score_tab[i-run];
4193 if(score < last_score){
4196 last_level= level-64;
4202 distortion += esc_length*lambda;
4203 for(j=survivor_count-1; j>=0; j--){
4204 int run= i - survivor[j];
4205 int score= distortion + score_tab[i-run];
4207 if(score < best_score){
4210 level_tab[i+1]= level-64;
4214 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4215 for(j=survivor_count-1; j>=0; j--){
4216 int run= i - survivor[j];
4217 int score= distortion + score_tab[i-run];
4218 if(score < last_score){
4221 last_level= level-64;
4229 score_tab[i+1]= best_score;
4231 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4232 if(last_non_zero <= 27){
4233 for(; survivor_count; survivor_count--){
4234 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4238 for(; survivor_count; survivor_count--){
4239 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4244 survivor[ survivor_count++ ]= i+1;
4247 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4248 last_score= 256*256*256*120;
4249 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4250 int score= score_tab[i];
4252 score += lambda * 2; // FIXME more exact?
4254 if(score < last_score){
4257 last_level= level_tab[i];
4258 last_run= run_tab[i];
4263 s->coded_score[n] = last_score;
4265 dc= FFABS(block[0]);
4266 last_non_zero= last_i - 1;
4267 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4269 if(last_non_zero < start_i)
4270 return last_non_zero;
4272 if(last_non_zero == 0 && start_i == 0){
4274 int best_score= dc * dc;
4276 for(i=0; i<coeff_count[0]; i++){
4277 int level= coeff[i][0];
4278 int alevel= FFABS(level);
4279 int unquant_coeff, score, distortion;
4281 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4282 unquant_coeff= (alevel*qmul + qadd)>>3;
4284 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4285 unquant_coeff = (unquant_coeff - 1) | 1;
4287 unquant_coeff = (unquant_coeff + 4) >> 3;
4288 unquant_coeff<<= 3 + 3;
4290 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4292 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4293 else score= distortion + esc_length*lambda;
4295 if(score < best_score){
4297 best_level= level - 64;
4300 block[0]= best_level;
4301 s->coded_score[n] = best_score - dc*dc;
4302 if(best_level == 0) return -1;
4303 else return last_non_zero;
4307 av_assert2(last_level);
4309 block[ perm_scantable[last_non_zero] ]= last_level;
4312 for(; i>start_i; i -= run_tab[i] + 1){
4313 block[ perm_scantable[i-1] ]= level_tab[i];
4316 return last_non_zero;
4319 static int16_t basis[64][64];
4321 static void build_basis(uint8_t *perm){
4328 double s= 0.25*(1<<BASIS_SHIFT);
4330 int perm_index= perm[index];
4331 if(i==0) s*= sqrt(0.5);
4332 if(j==0) s*= sqrt(0.5);
4333 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4340 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4341 int16_t *block, int16_t *weight, int16_t *orig,
4344 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4345 const uint8_t *scantable;
4346 const uint8_t *perm_scantable;
4347 // unsigned int threshold1, threshold2;
4352 int qmul, qadd, start_i, last_non_zero, i, dc;
4354 uint8_t * last_length;
4356 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4358 if(basis[0][0] == 0)
4359 build_basis(s->idsp.idct_permutation);
4364 scantable= s->intra_scantable.scantable;
4365 perm_scantable= s->intra_scantable.permutated;
4372 /* For AIC we skip quant/dequant of INTRADC */
4376 q <<= RECON_SHIFT-3;
4377 /* note: block[0] is assumed to be positive */
4379 // block[0] = (block[0] + (q >> 1)) / q;
4381 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4382 // bias= 1<<(QMAT_SHIFT-1);
4383 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4384 length = s->intra_chroma_ac_vlc_length;
4385 last_length= s->intra_chroma_ac_vlc_last_length;
4387 length = s->intra_ac_vlc_length;
4388 last_length= s->intra_ac_vlc_last_length;
4391 scantable= s->inter_scantable.scantable;
4392 perm_scantable= s->inter_scantable.permutated;
4395 length = s->inter_ac_vlc_length;
4396 last_length= s->inter_ac_vlc_last_length;
4398 last_non_zero = s->block_last_index[n];
4400 dc += (1<<(RECON_SHIFT-1));
4401 for(i=0; i<64; i++){
4402 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4406 for(i=0; i<64; i++){
4411 w= FFABS(weight[i]) + qns*one;
4412 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4415 // w=weight[i] = (63*qns + (w/2)) / w;
4418 av_assert2(w<(1<<6));
4421 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4425 for(i=start_i; i<=last_non_zero; i++){
4426 int j= perm_scantable[i];
4427 const int level= block[j];
4431 if(level<0) coeff= qmul*level - qadd;
4432 else coeff= qmul*level + qadd;
4433 run_tab[rle_index++]=run;
4436 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4443 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4446 int run2, best_unquant_change=0, analyze_gradient;
4447 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4449 if(analyze_gradient){
4450 for(i=0; i<64; i++){
4453 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4459 const int level= block[0];
4460 int change, old_coeff;
4462 av_assert2(s->mb_intra);
4466 for(change=-1; change<=1; change+=2){
4467 int new_level= level + change;
4468 int score, new_coeff;
4470 new_coeff= q*new_level;
4471 if(new_coeff >= 2048 || new_coeff < 0)
4474 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4475 new_coeff - old_coeff);
4476 if(score<best_score){
4479 best_change= change;
4480 best_unquant_change= new_coeff - old_coeff;
4487 run2= run_tab[rle_index++];
4491 for(i=start_i; i<64; i++){
4492 int j= perm_scantable[i];
4493 const int level= block[j];
4494 int change, old_coeff;
4496 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4500 if(level<0) old_coeff= qmul*level - qadd;
4501 else old_coeff= qmul*level + qadd;
4502 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4506 av_assert2(run2>=0 || i >= last_non_zero );
4509 for(change=-1; change<=1; change+=2){
4510 int new_level= level + change;
4511 int score, new_coeff, unquant_change;
4514 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4518 if(new_level<0) new_coeff= qmul*new_level - qadd;
4519 else new_coeff= qmul*new_level + qadd;
4520 if(new_coeff >= 2048 || new_coeff <= -2048)
4522 //FIXME check for overflow
4525 if(level < 63 && level > -63){
4526 if(i < last_non_zero)
4527 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4528 - length[UNI_AC_ENC_INDEX(run, level+64)];
4530 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4531 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4534 av_assert2(FFABS(new_level)==1);
4536 if(analyze_gradient){
4537 int g= d1[ scantable[i] ];
4538 if(g && (g^new_level) >= 0)
4542 if(i < last_non_zero){
4543 int next_i= i + run2 + 1;
4544 int next_level= block[ perm_scantable[next_i] ] + 64;
4546 if(next_level&(~127))
4549 if(next_i < last_non_zero)
4550 score += length[UNI_AC_ENC_INDEX(run, 65)]
4551 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4552 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4554 score += length[UNI_AC_ENC_INDEX(run, 65)]
4555 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4556 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4558 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4560 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4561 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4567 av_assert2(FFABS(level)==1);
4569 if(i < last_non_zero){
4570 int next_i= i + run2 + 1;
4571 int next_level= block[ perm_scantable[next_i] ] + 64;
4573 if(next_level&(~127))
4576 if(next_i < last_non_zero)
4577 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4578 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4579 - length[UNI_AC_ENC_INDEX(run, 65)];
4581 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4582 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4583 - length[UNI_AC_ENC_INDEX(run, 65)];
4585 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4587 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4588 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4595 unquant_change= new_coeff - old_coeff;
4596 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4598 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4600 if(score<best_score){
4603 best_change= change;
4604 best_unquant_change= unquant_change;
4608 prev_level= level + 64;
4609 if(prev_level&(~127))
4619 int j= perm_scantable[ best_coeff ];
4621 block[j] += best_change;
4623 if(best_coeff > last_non_zero){
4624 last_non_zero= best_coeff;
4625 av_assert2(block[j]);
4627 for(; last_non_zero>=start_i; last_non_zero--){
4628 if(block[perm_scantable[last_non_zero]])
4635 for(i=start_i; i<=last_non_zero; i++){
4636 int j= perm_scantable[i];
4637 const int level= block[j];
4640 run_tab[rle_index++]=run;
4647 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4653 return last_non_zero;
4657 * Permute an 8x8 block according to permutation.
4658 * @param block the block which will be permuted according to
4659 * the given permutation vector
4660 * @param permutation the permutation vector
4661 * @param last the last non zero coefficient in scantable order, used to
4662 * speed the permutation up
4663 * @param scantable the used scantable, this is only used to speed the
4664 * permutation up, the block is not (inverse) permutated
4665 * to scantable order!
4667 void ff_block_permute(int16_t *block, uint8_t *permutation,
4668 const uint8_t *scantable, int last)
4675 //FIXME it is ok but not clean and might fail for some permutations
4676 // if (permutation[1] == 1)
4679 for (i = 0; i <= last; i++) {
4680 const int j = scantable[i];
4685 for (i = 0; i <= last; i++) {
4686 const int j = scantable[i];
4687 const int perm_j = permutation[j];
4688 block[perm_j] = temp[j];
4692 int ff_dct_quantize_c(MpegEncContext *s,
4693 int16_t *block, int n,
4694 int qscale, int *overflow)
4696 int i, j, level, last_non_zero, q, start_i;
4698 const uint8_t *scantable;
4701 unsigned int threshold1, threshold2;
4703 s->fdsp.fdct(block);
4705 if(s->dct_error_sum)
4706 s->denoise_dct(s, block);
4709 scantable= s->intra_scantable.scantable;
4717 /* For AIC we skip quant/dequant of INTRADC */
4720 /* note: block[0] is assumed to be positive */
4721 block[0] = (block[0] + (q >> 1)) / q;
4724 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4725 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4727 scantable= s->inter_scantable.scantable;
4730 qmat = s->q_inter_matrix[qscale];
4731 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4733 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4734 threshold2= (threshold1<<1);
4735 for(i=63;i>=start_i;i--) {
4737 level = block[j] * qmat[j];
4739 if(((unsigned)(level+threshold1))>threshold2){
4746 for(i=start_i; i<=last_non_zero; i++) {
4748 level = block[j] * qmat[j];
4750 // if( bias+level >= (1<<QMAT_SHIFT)
4751 // || bias-level >= (1<<QMAT_SHIFT)){
4752 if(((unsigned)(level+threshold1))>threshold2){
4754 level= (bias + level)>>QMAT_SHIFT;
4757 level= (bias - level)>>QMAT_SHIFT;
4765 *overflow= s->max_qcoeff < max; //overflow might have happened
4767 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4768 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4769 ff_block_permute(block, s->idsp.idct_permutation,
4770 scantable, last_non_zero);
4772 return last_non_zero;
4775 #define OFFSET(x) offsetof(MpegEncContext, x)
4776 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4777 static const AVOption h263_options[] = {
4778 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4779 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4784 static const AVClass h263_class = {
4785 .class_name = "H.263 encoder",
4786 .item_name = av_default_item_name,
4787 .option = h263_options,
4788 .version = LIBAVUTIL_VERSION_INT,
4791 AVCodec ff_h263_encoder = {
4793 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4794 .type = AVMEDIA_TYPE_VIDEO,
4795 .id = AV_CODEC_ID_H263,
4796 .priv_data_size = sizeof(MpegEncContext),
4797 .init = ff_mpv_encode_init,
4798 .encode2 = ff_mpv_encode_picture,
4799 .close = ff_mpv_encode_end,
4800 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4801 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4802 .priv_class = &h263_class,
4805 static const AVOption h263p_options[] = {
4806 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4807 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4808 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4809 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4813 static const AVClass h263p_class = {
4814 .class_name = "H.263p encoder",
4815 .item_name = av_default_item_name,
4816 .option = h263p_options,
4817 .version = LIBAVUTIL_VERSION_INT,
4820 AVCodec ff_h263p_encoder = {
4822 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4823 .type = AVMEDIA_TYPE_VIDEO,
4824 .id = AV_CODEC_ID_H263P,
4825 .priv_data_size = sizeof(MpegEncContext),
4826 .init = ff_mpv_encode_init,
4827 .encode2 = ff_mpv_encode_picture,
4828 .close = ff_mpv_encode_end,
4829 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4830 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4831 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4832 .priv_class = &h263p_class,
4835 static const AVClass msmpeg4v2_class = {
4836 .class_name = "msmpeg4v2 encoder",
4837 .item_name = av_default_item_name,
4838 .option = ff_mpv_generic_options,
4839 .version = LIBAVUTIL_VERSION_INT,
4842 AVCodec ff_msmpeg4v2_encoder = {
4843 .name = "msmpeg4v2",
4844 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4845 .type = AVMEDIA_TYPE_VIDEO,
4846 .id = AV_CODEC_ID_MSMPEG4V2,
4847 .priv_data_size = sizeof(MpegEncContext),
4848 .init = ff_mpv_encode_init,
4849 .encode2 = ff_mpv_encode_picture,
4850 .close = ff_mpv_encode_end,
4851 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4852 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4853 .priv_class = &msmpeg4v2_class,
4856 static const AVClass msmpeg4v3_class = {
4857 .class_name = "msmpeg4v3 encoder",
4858 .item_name = av_default_item_name,
4859 .option = ff_mpv_generic_options,
4860 .version = LIBAVUTIL_VERSION_INT,
4863 AVCodec ff_msmpeg4v3_encoder = {
4865 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4866 .type = AVMEDIA_TYPE_VIDEO,
4867 .id = AV_CODEC_ID_MSMPEG4V3,
4868 .priv_data_size = sizeof(MpegEncContext),
4869 .init = ff_mpv_encode_init,
4870 .encode2 = ff_mpv_encode_picture,
4871 .close = ff_mpv_encode_end,
4872 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4873 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4874 .priv_class = &msmpeg4v3_class,
4877 static const AVClass wmv1_class = {
4878 .class_name = "wmv1 encoder",
4879 .item_name = av_default_item_name,
4880 .option = ff_mpv_generic_options,
4881 .version = LIBAVUTIL_VERSION_INT,
4884 AVCodec ff_wmv1_encoder = {
4886 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4887 .type = AVMEDIA_TYPE_VIDEO,
4888 .id = AV_CODEC_ID_WMV1,
4889 .priv_data_size = sizeof(MpegEncContext),
4890 .init = ff_mpv_encode_init,
4891 .encode2 = ff_mpv_encode_picture,
4892 .close = ff_mpv_encode_end,
4893 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4894 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4895 .priv_class = &wmv1_class,