2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/mem_internal.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/thread.h"
47 #include "mpegvideo.h"
48 #include "mpegvideodata.h"
52 #include "mjpegenc_common.h"
54 #include "mpegutils.h"
56 #include "speedhqenc.h"
58 #include "pixblockdsp.h"
62 #include "aandcttab.h"
64 #include "mpeg4video.h"
66 #include "bytestream.h"
69 #include "packet_internal.h"
73 #define QUANT_BIAS_SHIFT 8
75 #define QMAT_SHIFT_MMX 16
78 static int encode_picture(MpegEncContext *s, int picture_number);
79 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
80 static int sse_mb(MpegEncContext *s);
81 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
82 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
84 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
85 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
87 const AVOption ff_mpv_generic_options[] = {
92 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
93 uint16_t (*qmat16)[2][64],
94 const uint16_t *quant_matrix,
95 int bias, int qmin, int qmax, int intra)
97 FDCTDSPContext *fdsp = &s->fdsp;
101 for (qscale = qmin; qscale <= qmax; qscale++) {
105 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
106 else qscale2 = qscale << 1;
108 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
110 fdsp->fdct == ff_faandct ||
111 #endif /* CONFIG_FAANDCT */
112 fdsp->fdct == ff_jpeg_fdct_islow_10) {
113 for (i = 0; i < 64; i++) {
114 const int j = s->idsp.idct_permutation[i];
115 int64_t den = (int64_t) qscale2 * quant_matrix[j];
116 /* 16 <= qscale * quant_matrix[i] <= 7905
117 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
118 * 19952 <= x <= 249205026
119 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
120 * 3444240 >= (1 << 36) / (x) >= 275 */
122 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
124 } else if (fdsp->fdct == ff_fdct_ifast) {
125 for (i = 0; i < 64; i++) {
126 const int j = s->idsp.idct_permutation[i];
127 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
128 /* 16 <= qscale * quant_matrix[i] <= 7905
129 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
130 * 19952 <= x <= 249205026
131 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
132 * 3444240 >= (1 << 36) / (x) >= 275 */
134 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
137 for (i = 0; i < 64; i++) {
138 const int j = s->idsp.idct_permutation[i];
139 int64_t den = (int64_t) qscale2 * quant_matrix[j];
140 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
141 * Assume x = qscale * quant_matrix[i]
143 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
144 * so 32768 >= (1 << 19) / (x) >= 67 */
145 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
146 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
147 // (qscale * quant_matrix[i]);
148 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
150 if (qmat16[qscale][0][i] == 0 ||
151 qmat16[qscale][0][i] == 128 * 256)
152 qmat16[qscale][0][i] = 128 * 256 - 1;
153 qmat16[qscale][1][i] =
154 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
155 qmat16[qscale][0][i]);
159 for (i = intra; i < 64; i++) {
161 if (fdsp->fdct == ff_fdct_ifast) {
162 max = (8191LL * ff_aanscales[i]) >> 14;
164 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
170 av_log(s->avctx, AV_LOG_INFO,
171 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
176 static inline void update_qscale(MpegEncContext *s)
178 if (s->q_scale_type == 1 && 0) {
180 int bestdiff=INT_MAX;
183 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
184 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
185 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
186 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
188 if (diff < bestdiff) {
195 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
196 (FF_LAMBDA_SHIFT + 7);
197 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
200 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
204 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
210 for (i = 0; i < 64; i++) {
211 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
218 * init s->current_picture.qscale_table from s->lambda_table
220 void ff_init_qscale_tab(MpegEncContext *s)
222 int8_t * const qscale_table = s->current_picture.qscale_table;
225 for (i = 0; i < s->mb_num; i++) {
226 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
227 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
228 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
233 static void update_duplicate_context_after_me(MpegEncContext *dst,
236 #define COPY(a) dst->a= src->a
238 COPY(current_picture);
244 COPY(picture_in_gop_number);
245 COPY(gop_picture_number);
246 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
247 COPY(progressive_frame); // FIXME don't set in encode_header
248 COPY(partitioned_frame); // FIXME don't set in encode_header
252 static void mpv_encode_init_static(void)
254 for (int i = -16; i < 16; i++)
255 default_fcode_tab[i + MAX_MV] = 1;
259 * Set the given MpegEncContext to defaults for encoding.
260 * the changed fields will not depend upon the prior state of the MpegEncContext.
262 static void mpv_encode_defaults(MpegEncContext *s)
264 static AVOnce init_static_once = AV_ONCE_INIT;
266 ff_mpv_common_defaults(s);
268 ff_thread_once(&init_static_once, mpv_encode_init_static);
270 s->me.mv_penalty = default_mv_penalty;
271 s->fcode_tab = default_fcode_tab;
273 s->input_picture_number = 0;
274 s->picture_in_gop_number = 0;
277 av_cold int ff_dct_encode_init(MpegEncContext *s)
280 ff_dct_encode_init_x86(s);
282 if (CONFIG_H263_ENCODER)
283 ff_h263dsp_init(&s->h263dsp);
284 if (!s->dct_quantize)
285 s->dct_quantize = ff_dct_quantize_c;
287 s->denoise_dct = denoise_dct_c;
288 s->fast_dct_quantize = s->dct_quantize;
289 if (s->avctx->trellis)
290 s->dct_quantize = dct_quantize_trellis_c;
295 /* init video encoder */
296 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
298 MpegEncContext *s = avctx->priv_data;
299 AVCPBProperties *cpb_props;
300 int i, ret, format_supported;
302 mpv_encode_defaults(s);
304 switch (avctx->codec_id) {
305 case AV_CODEC_ID_MPEG2VIDEO:
306 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
307 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
308 av_log(avctx, AV_LOG_ERROR,
309 "only YUV420 and YUV422 are supported\n");
310 return AVERROR(EINVAL);
313 case AV_CODEC_ID_MJPEG:
314 case AV_CODEC_ID_AMV:
315 format_supported = 0;
316 /* JPEG color space */
317 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
318 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
319 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
320 (avctx->color_range == AVCOL_RANGE_JPEG &&
321 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
322 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
323 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
324 format_supported = 1;
325 /* MPEG color space */
326 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
327 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
328 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
329 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
330 format_supported = 1;
332 if (!format_supported) {
333 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
334 return AVERROR(EINVAL);
337 case AV_CODEC_ID_SPEEDHQ:
338 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
339 avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
340 avctx->pix_fmt != AV_PIX_FMT_YUV444P) {
341 av_log(avctx, AV_LOG_ERROR,
342 "only YUV420/YUV422/YUV444 are supported (no alpha support yet)\n");
343 return AVERROR(EINVAL);
347 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
348 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
349 return AVERROR(EINVAL);
353 switch (avctx->pix_fmt) {
354 case AV_PIX_FMT_YUVJ444P:
355 case AV_PIX_FMT_YUV444P:
356 s->chroma_format = CHROMA_444;
358 case AV_PIX_FMT_YUVJ422P:
359 case AV_PIX_FMT_YUV422P:
360 s->chroma_format = CHROMA_422;
362 case AV_PIX_FMT_YUVJ420P:
363 case AV_PIX_FMT_YUV420P:
365 s->chroma_format = CHROMA_420;
369 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
371 #if FF_API_PRIVATE_OPT
372 FF_DISABLE_DEPRECATION_WARNINGS
373 if (avctx->rtp_payload_size)
374 s->rtp_payload_size = avctx->rtp_payload_size;
375 if (avctx->me_penalty_compensation)
376 s->me_penalty_compensation = avctx->me_penalty_compensation;
378 s->me_pre = avctx->pre_me;
379 FF_ENABLE_DEPRECATION_WARNINGS
382 s->bit_rate = avctx->bit_rate;
383 s->width = avctx->width;
384 s->height = avctx->height;
385 if (avctx->gop_size > 600 &&
386 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
387 av_log(avctx, AV_LOG_WARNING,
388 "keyframe interval too large!, reducing it from %d to %d\n",
389 avctx->gop_size, 600);
390 avctx->gop_size = 600;
392 s->gop_size = avctx->gop_size;
394 if (avctx->max_b_frames > MAX_B_FRAMES) {
395 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
396 "is %d.\n", MAX_B_FRAMES);
397 avctx->max_b_frames = MAX_B_FRAMES;
399 s->max_b_frames = avctx->max_b_frames;
400 s->codec_id = avctx->codec->id;
401 s->strict_std_compliance = avctx->strict_std_compliance;
402 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
403 s->rtp_mode = !!s->rtp_payload_size;
404 s->intra_dc_precision = avctx->intra_dc_precision;
406 // workaround some differences between how applications specify dc precision
407 if (s->intra_dc_precision < 0) {
408 s->intra_dc_precision += 8;
409 } else if (s->intra_dc_precision >= 8)
410 s->intra_dc_precision -= 8;
412 if (s->intra_dc_precision < 0) {
413 av_log(avctx, AV_LOG_ERROR,
414 "intra dc precision must be positive, note some applications use"
415 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
416 return AVERROR(EINVAL);
419 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
422 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
423 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
424 return AVERROR(EINVAL);
426 s->user_specified_pts = AV_NOPTS_VALUE;
428 if (s->gop_size <= 1) {
436 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
438 s->adaptive_quant = (avctx->lumi_masking ||
439 avctx->dark_masking ||
440 avctx->temporal_cplx_masking ||
441 avctx->spatial_cplx_masking ||
444 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
447 s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
449 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
450 switch(avctx->codec_id) {
451 case AV_CODEC_ID_MPEG1VIDEO:
452 case AV_CODEC_ID_MPEG2VIDEO:
453 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
455 case AV_CODEC_ID_MPEG4:
456 case AV_CODEC_ID_MSMPEG4V1:
457 case AV_CODEC_ID_MSMPEG4V2:
458 case AV_CODEC_ID_MSMPEG4V3:
459 if (avctx->rc_max_rate >= 15000000) {
460 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
461 } else if(avctx->rc_max_rate >= 2000000) {
462 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
463 } else if(avctx->rc_max_rate >= 384000) {
464 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
466 avctx->rc_buffer_size = 40;
467 avctx->rc_buffer_size *= 16384;
470 if (avctx->rc_buffer_size) {
471 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
475 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
476 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
477 return AVERROR(EINVAL);
480 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
481 av_log(avctx, AV_LOG_INFO,
482 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
485 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
486 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
487 return AVERROR(EINVAL);
490 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
491 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
492 return AVERROR(EINVAL);
495 if (avctx->rc_max_rate &&
496 avctx->rc_max_rate == avctx->bit_rate &&
497 avctx->rc_max_rate != avctx->rc_min_rate) {
498 av_log(avctx, AV_LOG_INFO,
499 "impossible bitrate constraints, this will fail\n");
502 if (avctx->rc_buffer_size &&
503 avctx->bit_rate * (int64_t)avctx->time_base.num >
504 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
505 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
506 return AVERROR(EINVAL);
509 if (!s->fixed_qscale &&
510 avctx->bit_rate * av_q2d(avctx->time_base) >
511 avctx->bit_rate_tolerance) {
512 av_log(avctx, AV_LOG_WARNING,
513 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
514 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
517 if (avctx->rc_max_rate &&
518 avctx->rc_min_rate == avctx->rc_max_rate &&
519 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
520 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
521 90000LL * (avctx->rc_buffer_size - 1) >
522 avctx->rc_max_rate * 0xFFFFLL) {
523 av_log(avctx, AV_LOG_INFO,
524 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
525 "specified vbv buffer is too large for the given bitrate!\n");
528 if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
529 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
530 s->codec_id != AV_CODEC_ID_FLV1) {
531 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
532 return AVERROR(EINVAL);
535 if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
536 av_log(avctx, AV_LOG_ERROR,
537 "OBMC is only supported with simple mb decision\n");
538 return AVERROR(EINVAL);
541 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
542 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
543 return AVERROR(EINVAL);
546 if (s->max_b_frames &&
547 s->codec_id != AV_CODEC_ID_MPEG4 &&
548 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
549 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
550 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
551 return AVERROR(EINVAL);
553 if (s->max_b_frames < 0) {
554 av_log(avctx, AV_LOG_ERROR,
555 "max b frames must be 0 or positive for mpegvideo based encoders\n");
556 return AVERROR(EINVAL);
559 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
560 s->codec_id == AV_CODEC_ID_H263 ||
561 s->codec_id == AV_CODEC_ID_H263P) &&
562 (avctx->sample_aspect_ratio.num > 255 ||
563 avctx->sample_aspect_ratio.den > 255)) {
564 av_log(avctx, AV_LOG_WARNING,
565 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
566 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
567 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
568 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
571 if ((s->codec_id == AV_CODEC_ID_H263 ||
572 s->codec_id == AV_CODEC_ID_H263P) &&
573 (avctx->width > 2048 ||
574 avctx->height > 1152 )) {
575 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
576 return AVERROR(EINVAL);
578 if ((s->codec_id == AV_CODEC_ID_H263 ||
579 s->codec_id == AV_CODEC_ID_H263P) &&
580 ((avctx->width &3) ||
581 (avctx->height&3) )) {
582 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
583 return AVERROR(EINVAL);
586 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
587 (avctx->width > 4095 ||
588 avctx->height > 4095 )) {
589 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
590 return AVERROR(EINVAL);
593 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
594 (avctx->width > 16383 ||
595 avctx->height > 16383 )) {
596 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
597 return AVERROR(EINVAL);
600 if (s->codec_id == AV_CODEC_ID_RV10 &&
602 avctx->height&15 )) {
603 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
604 return AVERROR(EINVAL);
607 if (s->codec_id == AV_CODEC_ID_RV20 &&
610 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
611 return AVERROR(EINVAL);
614 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
615 s->codec_id == AV_CODEC_ID_WMV2) &&
617 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
618 return AVERROR(EINVAL);
621 if ((avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
622 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
623 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
624 return AVERROR(EINVAL);
627 #if FF_API_PRIVATE_OPT
628 FF_DISABLE_DEPRECATION_WARNINGS
629 if (avctx->mpeg_quant)
630 s->mpeg_quant = avctx->mpeg_quant;
631 FF_ENABLE_DEPRECATION_WARNINGS
634 // FIXME mpeg2 uses that too
635 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
636 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
637 av_log(avctx, AV_LOG_ERROR,
638 "mpeg2 style quantization not supported by codec\n");
639 return AVERROR(EINVAL);
642 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
643 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
644 return AVERROR(EINVAL);
647 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
648 avctx->mb_decision != FF_MB_DECISION_RD) {
649 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
650 return AVERROR(EINVAL);
653 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
654 (s->codec_id == AV_CODEC_ID_AMV ||
655 s->codec_id == AV_CODEC_ID_MJPEG)) {
656 // Used to produce garbage with MJPEG.
657 av_log(avctx, AV_LOG_ERROR,
658 "QP RD is no longer compatible with MJPEG or AMV\n");
659 return AVERROR(EINVAL);
662 #if FF_API_PRIVATE_OPT
663 FF_DISABLE_DEPRECATION_WARNINGS
664 if (avctx->scenechange_threshold)
665 s->scenechange_threshold = avctx->scenechange_threshold;
666 FF_ENABLE_DEPRECATION_WARNINGS
669 if (s->scenechange_threshold < 1000000000 &&
670 (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
671 av_log(avctx, AV_LOG_ERROR,
672 "closed gop with scene change detection are not supported yet, "
673 "set threshold to 1000000000\n");
674 return AVERROR_PATCHWELCOME;
677 if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
678 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
679 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
680 av_log(avctx, AV_LOG_ERROR,
681 "low delay forcing is only available for mpeg2, "
682 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
683 return AVERROR(EINVAL);
685 if (s->max_b_frames != 0) {
686 av_log(avctx, AV_LOG_ERROR,
687 "B-frames cannot be used with low delay\n");
688 return AVERROR(EINVAL);
692 if (s->q_scale_type == 1) {
693 if (avctx->qmax > 28) {
694 av_log(avctx, AV_LOG_ERROR,
695 "non linear quant only supports qmax <= 28 currently\n");
696 return AVERROR_PATCHWELCOME;
700 if (avctx->slices > 1 &&
701 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
702 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
703 return AVERROR(EINVAL);
706 if (avctx->thread_count > 1 &&
707 s->codec_id != AV_CODEC_ID_MPEG4 &&
708 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
709 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
710 s->codec_id != AV_CODEC_ID_MJPEG &&
711 (s->codec_id != AV_CODEC_ID_H263P)) {
712 av_log(avctx, AV_LOG_ERROR,
713 "multi threaded encoding not supported by codec\n");
714 return AVERROR_PATCHWELCOME;
717 if (avctx->thread_count < 1) {
718 av_log(avctx, AV_LOG_ERROR,
719 "automatic thread number detection not supported by codec, "
721 return AVERROR_PATCHWELCOME;
724 if (!avctx->time_base.den || !avctx->time_base.num) {
725 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
726 return AVERROR(EINVAL);
729 #if FF_API_PRIVATE_OPT
730 FF_DISABLE_DEPRECATION_WARNINGS
731 if (avctx->b_frame_strategy)
732 s->b_frame_strategy = avctx->b_frame_strategy;
733 if (avctx->b_sensitivity != 40)
734 s->b_sensitivity = avctx->b_sensitivity;
735 FF_ENABLE_DEPRECATION_WARNINGS
738 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
739 av_log(avctx, AV_LOG_INFO,
740 "notice: b_frame_strategy only affects the first pass\n");
741 s->b_frame_strategy = 0;
744 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
746 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
747 avctx->time_base.den /= i;
748 avctx->time_base.num /= i;
752 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
753 // (a + x * 3 / 8) / x
754 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
755 s->inter_quant_bias = 0;
757 s->intra_quant_bias = 0;
759 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
762 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
763 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
764 return AVERROR(EINVAL);
767 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
769 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
770 avctx->time_base.den > (1 << 16) - 1) {
771 av_log(avctx, AV_LOG_ERROR,
772 "timebase %d/%d not supported by MPEG 4 standard, "
773 "the maximum admitted value for the timebase denominator "
774 "is %d\n", avctx->time_base.num, avctx->time_base.den,
776 return AVERROR(EINVAL);
778 s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
780 switch (avctx->codec->id) {
781 case AV_CODEC_ID_MPEG1VIDEO:
782 s->out_format = FMT_MPEG1;
783 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
784 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
786 case AV_CODEC_ID_MPEG2VIDEO:
787 s->out_format = FMT_MPEG1;
788 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
789 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
792 case AV_CODEC_ID_MJPEG:
793 case AV_CODEC_ID_AMV:
794 s->out_format = FMT_MJPEG;
795 s->intra_only = 1; /* force intra only for jpeg */
796 if (!CONFIG_MJPEG_ENCODER)
797 return AVERROR_ENCODER_NOT_FOUND;
798 if ((ret = ff_mjpeg_encode_init(s)) < 0)
803 case AV_CODEC_ID_SPEEDHQ:
804 s->out_format = FMT_SPEEDHQ;
805 s->intra_only = 1; /* force intra only for SHQ */
806 if (!CONFIG_SPEEDHQ_ENCODER)
807 return AVERROR_ENCODER_NOT_FOUND;
808 if ((ret = ff_speedhq_encode_init(s)) < 0)
813 case AV_CODEC_ID_H261:
814 if (!CONFIG_H261_ENCODER)
815 return AVERROR_ENCODER_NOT_FOUND;
816 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
817 av_log(avctx, AV_LOG_ERROR,
818 "The specified picture size of %dx%d is not valid for the "
819 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
820 s->width, s->height);
821 return AVERROR(EINVAL);
823 s->out_format = FMT_H261;
826 s->rtp_mode = 0; /* Sliced encoding not supported */
828 case AV_CODEC_ID_H263:
829 if (!CONFIG_H263_ENCODER)
830 return AVERROR_ENCODER_NOT_FOUND;
831 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
832 s->width, s->height) == 8) {
833 av_log(avctx, AV_LOG_ERROR,
834 "The specified picture size of %dx%d is not valid for "
835 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
836 "352x288, 704x576, and 1408x1152. "
837 "Try H.263+.\n", s->width, s->height);
838 return AVERROR(EINVAL);
840 s->out_format = FMT_H263;
844 case AV_CODEC_ID_H263P:
845 s->out_format = FMT_H263;
848 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
849 s->modified_quant = s->h263_aic;
850 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
851 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
854 /* These are just to be sure */
858 case AV_CODEC_ID_FLV1:
859 s->out_format = FMT_H263;
860 s->h263_flv = 2; /* format = 1; 11-bit codes */
861 s->unrestricted_mv = 1;
862 s->rtp_mode = 0; /* don't allow GOB */
866 case AV_CODEC_ID_RV10:
867 s->out_format = FMT_H263;
871 case AV_CODEC_ID_RV20:
872 s->out_format = FMT_H263;
875 s->modified_quant = 1;
879 s->unrestricted_mv = 0;
881 case AV_CODEC_ID_MPEG4:
882 s->out_format = FMT_H263;
884 s->unrestricted_mv = 1;
885 s->low_delay = s->max_b_frames ? 0 : 1;
886 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
888 case AV_CODEC_ID_MSMPEG4V2:
889 s->out_format = FMT_H263;
891 s->unrestricted_mv = 1;
892 s->msmpeg4_version = 2;
896 case AV_CODEC_ID_MSMPEG4V3:
897 s->out_format = FMT_H263;
899 s->unrestricted_mv = 1;
900 s->msmpeg4_version = 3;
901 s->flipflop_rounding = 1;
905 case AV_CODEC_ID_WMV1:
906 s->out_format = FMT_H263;
908 s->unrestricted_mv = 1;
909 s->msmpeg4_version = 4;
910 s->flipflop_rounding = 1;
914 case AV_CODEC_ID_WMV2:
915 s->out_format = FMT_H263;
917 s->unrestricted_mv = 1;
918 s->msmpeg4_version = 5;
919 s->flipflop_rounding = 1;
924 return AVERROR(EINVAL);
927 #if FF_API_PRIVATE_OPT
928 FF_DISABLE_DEPRECATION_WARNINGS
929 if (avctx->noise_reduction)
930 s->noise_reduction = avctx->noise_reduction;
931 FF_ENABLE_DEPRECATION_WARNINGS
934 avctx->has_b_frames = !s->low_delay;
938 s->progressive_frame =
939 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
940 AV_CODEC_FLAG_INTERLACED_ME) ||
945 if ((ret = ff_mpv_common_init(s)) < 0)
948 ff_fdctdsp_init(&s->fdsp, avctx);
949 ff_me_cmp_init(&s->mecc, avctx);
950 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
951 ff_pixblockdsp_init(&s->pdsp, avctx);
952 ff_qpeldsp_init(&s->qdsp);
954 if (s->msmpeg4_version) {
955 int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
956 if (!(s->ac_stats = av_mallocz(ac_stats_size)))
957 return AVERROR(ENOMEM);
960 if (!(avctx->stats_out = av_mallocz(256)) ||
961 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
962 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
963 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
964 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
965 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
966 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
967 !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
968 !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
969 return AVERROR(ENOMEM);
971 if (s->noise_reduction) {
972 if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
973 return AVERROR(ENOMEM);
976 ff_dct_encode_init(s);
978 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
979 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
981 if (s->slice_context_count > 1) {
984 if (avctx->codec_id == AV_CODEC_ID_H263P)
985 s->h263_slice_structured = 1;
988 s->quant_precision = 5;
990 #if FF_API_PRIVATE_OPT
991 FF_DISABLE_DEPRECATION_WARNINGS
992 if (avctx->frame_skip_threshold)
993 s->frame_skip_threshold = avctx->frame_skip_threshold;
994 if (avctx->frame_skip_factor)
995 s->frame_skip_factor = avctx->frame_skip_factor;
996 if (avctx->frame_skip_exp)
997 s->frame_skip_exp = avctx->frame_skip_exp;
998 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
999 s->frame_skip_cmp = avctx->frame_skip_cmp;
1000 FF_ENABLE_DEPRECATION_WARNINGS
1003 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
1004 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
1006 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
1007 ff_h261_encode_init(s);
1008 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
1009 ff_h263_encode_init(s);
1010 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
1011 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
1013 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1014 && s->out_format == FMT_MPEG1)
1015 ff_mpeg1_encode_init(s);
1018 for (i = 0; i < 64; i++) {
1019 int j = s->idsp.idct_permutation[i];
1020 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1022 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1023 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1024 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1025 s->intra_matrix[j] =
1026 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1027 } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
1028 s->intra_matrix[j] =
1029 s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1032 s->chroma_intra_matrix[j] =
1033 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1034 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1036 if (avctx->intra_matrix)
1037 s->intra_matrix[j] = avctx->intra_matrix[i];
1038 if (avctx->inter_matrix)
1039 s->inter_matrix[j] = avctx->inter_matrix[i];
1042 /* precompute matrix */
1043 /* for mjpeg, we do include qscale in the matrix */
1044 if (s->out_format != FMT_MJPEG) {
1045 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1046 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1048 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1049 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1053 if ((ret = ff_rate_control_init(s)) < 0)
1056 #if FF_API_PRIVATE_OPT
1057 FF_DISABLE_DEPRECATION_WARNINGS
1058 if (avctx->brd_scale)
1059 s->brd_scale = avctx->brd_scale;
1061 if (avctx->prediction_method)
1062 s->pred = avctx->prediction_method + 1;
1063 FF_ENABLE_DEPRECATION_WARNINGS
1066 if (s->b_frame_strategy == 2) {
1067 for (i = 0; i < s->max_b_frames + 2; i++) {
1068 s->tmp_frames[i] = av_frame_alloc();
1069 if (!s->tmp_frames[i])
1070 return AVERROR(ENOMEM);
1072 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1073 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1074 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1076 ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1082 cpb_props = ff_add_cpb_side_data(avctx);
1084 return AVERROR(ENOMEM);
1085 cpb_props->max_bitrate = avctx->rc_max_rate;
1086 cpb_props->min_bitrate = avctx->rc_min_rate;
1087 cpb_props->avg_bitrate = avctx->bit_rate;
1088 cpb_props->buffer_size = avctx->rc_buffer_size;
1093 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1095 MpegEncContext *s = avctx->priv_data;
1098 ff_rate_control_uninit(s);
1100 ff_mpv_common_end(s);
1101 if (CONFIG_MJPEG_ENCODER &&
1102 s->out_format == FMT_MJPEG)
1103 ff_mjpeg_encode_close(s);
1105 av_freep(&avctx->extradata);
1107 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1108 av_frame_free(&s->tmp_frames[i]);
1110 ff_free_picture_tables(&s->new_picture);
1111 ff_mpeg_unref_picture(avctx, &s->new_picture);
1113 av_freep(&avctx->stats_out);
1114 av_freep(&s->ac_stats);
1116 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1117 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1118 s->q_chroma_intra_matrix= NULL;
1119 s->q_chroma_intra_matrix16= NULL;
1120 av_freep(&s->q_intra_matrix);
1121 av_freep(&s->q_inter_matrix);
1122 av_freep(&s->q_intra_matrix16);
1123 av_freep(&s->q_inter_matrix16);
1124 av_freep(&s->input_picture);
1125 av_freep(&s->reordered_input_picture);
1126 av_freep(&s->dct_offset);
1131 static int get_sae(uint8_t *src, int ref, int stride)
1136 for (y = 0; y < 16; y++) {
1137 for (x = 0; x < 16; x++) {
1138 acc += FFABS(src[x + y * stride] - ref);
1145 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1146 uint8_t *ref, int stride)
1152 h = s->height & ~15;
1154 for (y = 0; y < h; y += 16) {
1155 for (x = 0; x < w; x += 16) {
1156 int offset = x + y * stride;
1157 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1159 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1160 int sae = get_sae(src + offset, mean, stride);
1162 acc += sae + 500 < sad;
1168 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1170 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1171 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1172 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1173 &s->linesize, &s->uvlinesize);
1176 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1178 Picture *pic = NULL;
1180 int i, display_picture_number = 0, ret;
1181 int encoding_delay = s->max_b_frames ? s->max_b_frames
1182 : (s->low_delay ? 0 : 1);
1183 int flush_offset = 1;
1188 display_picture_number = s->input_picture_number++;
1190 if (pts != AV_NOPTS_VALUE) {
1191 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1192 int64_t last = s->user_specified_pts;
1195 av_log(s->avctx, AV_LOG_ERROR,
1196 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1198 return AVERROR(EINVAL);
1201 if (!s->low_delay && display_picture_number == 1)
1202 s->dts_delta = pts - last;
1204 s->user_specified_pts = pts;
1206 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1207 s->user_specified_pts =
1208 pts = s->user_specified_pts + 1;
1209 av_log(s->avctx, AV_LOG_INFO,
1210 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1213 pts = display_picture_number;
1217 if (!pic_arg->buf[0] ||
1218 pic_arg->linesize[0] != s->linesize ||
1219 pic_arg->linesize[1] != s->uvlinesize ||
1220 pic_arg->linesize[2] != s->uvlinesize)
1222 if ((s->width & 15) || (s->height & 15))
1224 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1226 if (s->linesize & (STRIDE_ALIGN-1))
1229 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1230 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1232 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1236 pic = &s->picture[i];
1240 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1243 ret = alloc_picture(s, pic, direct);
1248 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1249 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1250 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1253 int h_chroma_shift, v_chroma_shift;
1254 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1258 for (i = 0; i < 3; i++) {
1259 int src_stride = pic_arg->linesize[i];
1260 int dst_stride = i ? s->uvlinesize : s->linesize;
1261 int h_shift = i ? h_chroma_shift : 0;
1262 int v_shift = i ? v_chroma_shift : 0;
1263 int w = s->width >> h_shift;
1264 int h = s->height >> v_shift;
1265 uint8_t *src = pic_arg->data[i];
1266 uint8_t *dst = pic->f->data[i];
1269 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1270 && !s->progressive_sequence
1271 && FFALIGN(s->height, 32) - s->height > 16)
1274 if (!s->avctx->rc_buffer_size)
1275 dst += INPLACE_OFFSET;
1277 if (src_stride == dst_stride)
1278 memcpy(dst, src, src_stride * h);
1281 uint8_t *dst2 = dst;
1283 memcpy(dst2, src, w);
1288 if ((s->width & 15) || (s->height & (vpad-1))) {
1289 s->mpvencdsp.draw_edges(dst, dst_stride,
1299 ret = av_frame_copy_props(pic->f, pic_arg);
1303 pic->f->display_picture_number = display_picture_number;
1304 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1306 /* Flushing: When we have not received enough input frames,
1307 * ensure s->input_picture[0] contains the first picture */
1308 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1309 if (s->input_picture[flush_offset])
1312 if (flush_offset <= 1)
1315 encoding_delay = encoding_delay - flush_offset + 1;
1318 /* shift buffer entries */
1319 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1320 s->input_picture[i - flush_offset] = s->input_picture[i];
1322 s->input_picture[encoding_delay] = (Picture*) pic;
1327 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1331 int64_t score64 = 0;
1333 for (plane = 0; plane < 3; plane++) {
1334 const int stride = p->f->linesize[plane];
1335 const int bw = plane ? 1 : 2;
1336 for (y = 0; y < s->mb_height * bw; y++) {
1337 for (x = 0; x < s->mb_width * bw; x++) {
1338 int off = p->shared ? 0 : 16;
1339 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1340 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1341 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1343 switch (FFABS(s->frame_skip_exp)) {
1344 case 0: score = FFMAX(score, v); break;
1345 case 1: score += FFABS(v); break;
1346 case 2: score64 += v * (int64_t)v; break;
1347 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1348 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1357 if (s->frame_skip_exp < 0)
1358 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1359 -1.0/s->frame_skip_exp);
1361 if (score64 < s->frame_skip_threshold)
1363 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1368 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1370 AVPacket pkt = { 0 };
1374 av_init_packet(&pkt);
1376 ret = avcodec_send_frame(c, frame);
1381 ret = avcodec_receive_packet(c, &pkt);
1384 av_packet_unref(&pkt);
1385 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1392 static int estimate_best_b_count(MpegEncContext *s)
1394 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1395 const int scale = s->brd_scale;
1396 int width = s->width >> scale;
1397 int height = s->height >> scale;
1398 int i, j, out_size, p_lambda, b_lambda, lambda2;
1399 int64_t best_rd = INT64_MAX;
1400 int best_b_count = -1;
1403 av_assert0(scale >= 0 && scale <= 3);
1406 //s->next_picture_ptr->quality;
1407 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1408 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1409 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1410 if (!b_lambda) // FIXME we should do this somewhere else
1411 b_lambda = p_lambda;
1412 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1415 for (i = 0; i < s->max_b_frames + 2; i++) {
1416 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1417 s->next_picture_ptr;
1420 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1421 pre_input = *pre_input_ptr;
1422 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1424 if (!pre_input.shared && i) {
1425 data[0] += INPLACE_OFFSET;
1426 data[1] += INPLACE_OFFSET;
1427 data[2] += INPLACE_OFFSET;
1430 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1431 s->tmp_frames[i]->linesize[0],
1433 pre_input.f->linesize[0],
1435 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1436 s->tmp_frames[i]->linesize[1],
1438 pre_input.f->linesize[1],
1439 width >> 1, height >> 1);
1440 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1441 s->tmp_frames[i]->linesize[2],
1443 pre_input.f->linesize[2],
1444 width >> 1, height >> 1);
1448 for (j = 0; j < s->max_b_frames + 1; j++) {
1452 if (!s->input_picture[j])
1455 c = avcodec_alloc_context3(NULL);
1457 return AVERROR(ENOMEM);
1461 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1462 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1463 c->mb_decision = s->avctx->mb_decision;
1464 c->me_cmp = s->avctx->me_cmp;
1465 c->mb_cmp = s->avctx->mb_cmp;
1466 c->me_sub_cmp = s->avctx->me_sub_cmp;
1467 c->pix_fmt = AV_PIX_FMT_YUV420P;
1468 c->time_base = s->avctx->time_base;
1469 c->max_b_frames = s->max_b_frames;
1471 ret = avcodec_open2(c, codec, NULL);
1475 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1476 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1478 out_size = encode_frame(c, s->tmp_frames[0]);
1484 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1486 for (i = 0; i < s->max_b_frames + 1; i++) {
1487 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1489 s->tmp_frames[i + 1]->pict_type = is_p ?
1490 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1491 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1493 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1499 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1502 /* get the delayed frames */
1503 out_size = encode_frame(c, NULL);
1508 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1510 rd += c->error[0] + c->error[1] + c->error[2];
1518 avcodec_free_context(&c);
1523 return best_b_count;
1526 static int select_input_picture(MpegEncContext *s)
1530 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1531 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1532 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1534 /* set next picture type & ordering */
1535 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1536 if (s->frame_skip_threshold || s->frame_skip_factor) {
1537 if (s->picture_in_gop_number < s->gop_size &&
1538 s->next_picture_ptr &&
1539 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1540 // FIXME check that the gop check above is +-1 correct
1541 av_frame_unref(s->input_picture[0]->f);
1543 ff_vbv_update(s, 0);
1549 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1550 !s->next_picture_ptr || s->intra_only) {
1551 s->reordered_input_picture[0] = s->input_picture[0];
1552 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1553 s->reordered_input_picture[0]->f->coded_picture_number =
1554 s->coded_picture_number++;
1558 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1559 for (i = 0; i < s->max_b_frames + 1; i++) {
1560 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1562 if (pict_num >= s->rc_context.num_entries)
1564 if (!s->input_picture[i]) {
1565 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1569 s->input_picture[i]->f->pict_type =
1570 s->rc_context.entry[pict_num].new_pict_type;
1574 if (s->b_frame_strategy == 0) {
1575 b_frames = s->max_b_frames;
1576 while (b_frames && !s->input_picture[b_frames])
1578 } else if (s->b_frame_strategy == 1) {
1579 for (i = 1; i < s->max_b_frames + 1; i++) {
1580 if (s->input_picture[i] &&
1581 s->input_picture[i]->b_frame_score == 0) {
1582 s->input_picture[i]->b_frame_score =
1584 s->input_picture[i ]->f->data[0],
1585 s->input_picture[i - 1]->f->data[0],
1589 for (i = 0; i < s->max_b_frames + 1; i++) {
1590 if (!s->input_picture[i] ||
1591 s->input_picture[i]->b_frame_score - 1 >
1592 s->mb_num / s->b_sensitivity)
1596 b_frames = FFMAX(0, i - 1);
1599 for (i = 0; i < b_frames + 1; i++) {
1600 s->input_picture[i]->b_frame_score = 0;
1602 } else if (s->b_frame_strategy == 2) {
1603 b_frames = estimate_best_b_count(s);
1610 for (i = b_frames - 1; i >= 0; i--) {
1611 int type = s->input_picture[i]->f->pict_type;
1612 if (type && type != AV_PICTURE_TYPE_B)
1615 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1616 b_frames == s->max_b_frames) {
1617 av_log(s->avctx, AV_LOG_ERROR,
1618 "warning, too many B-frames in a row\n");
1621 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1622 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1623 s->gop_size > s->picture_in_gop_number) {
1624 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1626 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1628 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1632 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1633 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1636 s->reordered_input_picture[0] = s->input_picture[b_frames];
1637 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1638 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1639 s->reordered_input_picture[0]->f->coded_picture_number =
1640 s->coded_picture_number++;
1641 for (i = 0; i < b_frames; i++) {
1642 s->reordered_input_picture[i + 1] = s->input_picture[i];
1643 s->reordered_input_picture[i + 1]->f->pict_type =
1645 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1646 s->coded_picture_number++;
1651 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1653 if (s->reordered_input_picture[0]) {
1654 s->reordered_input_picture[0]->reference =
1655 s->reordered_input_picture[0]->f->pict_type !=
1656 AV_PICTURE_TYPE_B ? 3 : 0;
1658 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1661 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1662 // input is a shared pix, so we can't modify it -> allocate a new
1663 // one & ensure that the shared one is reuseable
1666 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1669 pic = &s->picture[i];
1671 pic->reference = s->reordered_input_picture[0]->reference;
1672 if (alloc_picture(s, pic, 0) < 0) {
1676 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1680 /* mark us unused / free shared pic */
1681 av_frame_unref(s->reordered_input_picture[0]->f);
1682 s->reordered_input_picture[0]->shared = 0;
1684 s->current_picture_ptr = pic;
1686 // input is not a shared pix -> reuse buffer for current_pix
1687 s->current_picture_ptr = s->reordered_input_picture[0];
1688 for (i = 0; i < 4; i++) {
1689 s->new_picture.f->data[i] += INPLACE_OFFSET;
1692 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1693 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1694 s->current_picture_ptr)) < 0)
1697 s->picture_number = s->new_picture.f->display_picture_number;
1702 static void frame_end(MpegEncContext *s)
1704 if (s->unrestricted_mv &&
1705 s->current_picture.reference &&
1707 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1708 int hshift = desc->log2_chroma_w;
1709 int vshift = desc->log2_chroma_h;
1710 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1711 s->current_picture.f->linesize[0],
1712 s->h_edge_pos, s->v_edge_pos,
1713 EDGE_WIDTH, EDGE_WIDTH,
1714 EDGE_TOP | EDGE_BOTTOM);
1715 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1716 s->current_picture.f->linesize[1],
1717 s->h_edge_pos >> hshift,
1718 s->v_edge_pos >> vshift,
1719 EDGE_WIDTH >> hshift,
1720 EDGE_WIDTH >> vshift,
1721 EDGE_TOP | EDGE_BOTTOM);
1722 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1723 s->current_picture.f->linesize[2],
1724 s->h_edge_pos >> hshift,
1725 s->v_edge_pos >> vshift,
1726 EDGE_WIDTH >> hshift,
1727 EDGE_WIDTH >> vshift,
1728 EDGE_TOP | EDGE_BOTTOM);
1733 s->last_pict_type = s->pict_type;
1734 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1735 if (s->pict_type!= AV_PICTURE_TYPE_B)
1736 s->last_non_b_pict_type = s->pict_type;
1738 #if FF_API_CODED_FRAME
1739 FF_DISABLE_DEPRECATION_WARNINGS
1740 av_frame_unref(s->avctx->coded_frame);
1741 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1742 FF_ENABLE_DEPRECATION_WARNINGS
1744 #if FF_API_ERROR_FRAME
1745 FF_DISABLE_DEPRECATION_WARNINGS
1746 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1747 sizeof(s->current_picture.encoding_error));
1748 FF_ENABLE_DEPRECATION_WARNINGS
1752 static void update_noise_reduction(MpegEncContext *s)
1756 for (intra = 0; intra < 2; intra++) {
1757 if (s->dct_count[intra] > (1 << 16)) {
1758 for (i = 0; i < 64; i++) {
1759 s->dct_error_sum[intra][i] >>= 1;
1761 s->dct_count[intra] >>= 1;
1764 for (i = 0; i < 64; i++) {
1765 s->dct_offset[intra][i] = (s->noise_reduction *
1766 s->dct_count[intra] +
1767 s->dct_error_sum[intra][i] / 2) /
1768 (s->dct_error_sum[intra][i] + 1);
1773 static int frame_start(MpegEncContext *s)
1777 /* mark & release old frames */
1778 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1779 s->last_picture_ptr != s->next_picture_ptr &&
1780 s->last_picture_ptr->f->buf[0]) {
1781 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1784 s->current_picture_ptr->f->pict_type = s->pict_type;
1785 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1787 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1788 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1789 s->current_picture_ptr)) < 0)
1792 if (s->pict_type != AV_PICTURE_TYPE_B) {
1793 s->last_picture_ptr = s->next_picture_ptr;
1795 s->next_picture_ptr = s->current_picture_ptr;
1798 if (s->last_picture_ptr) {
1799 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1800 if (s->last_picture_ptr->f->buf[0] &&
1801 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1802 s->last_picture_ptr)) < 0)
1805 if (s->next_picture_ptr) {
1806 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1807 if (s->next_picture_ptr->f->buf[0] &&
1808 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1809 s->next_picture_ptr)) < 0)
1813 if (s->picture_structure!= PICT_FRAME) {
1815 for (i = 0; i < 4; i++) {
1816 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1817 s->current_picture.f->data[i] +=
1818 s->current_picture.f->linesize[i];
1820 s->current_picture.f->linesize[i] *= 2;
1821 s->last_picture.f->linesize[i] *= 2;
1822 s->next_picture.f->linesize[i] *= 2;
1826 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1827 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1828 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1829 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1830 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1831 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1833 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1834 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1837 if (s->dct_error_sum) {
1838 av_assert2(s->noise_reduction && s->encoding);
1839 update_noise_reduction(s);
1845 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1846 const AVFrame *pic_arg, int *got_packet)
1848 MpegEncContext *s = avctx->priv_data;
1849 int i, stuffing_count, ret;
1850 int context_count = s->slice_context_count;
1852 s->vbv_ignore_qmax = 0;
1854 s->picture_in_gop_number++;
1856 if (load_input_picture(s, pic_arg) < 0)
1859 if (select_input_picture(s) < 0) {
1864 if (s->new_picture.f->data[0]) {
1865 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1866 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1868 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1869 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1872 s->mb_info_ptr = av_packet_new_side_data(pkt,
1873 AV_PKT_DATA_H263_MB_INFO,
1874 s->mb_width*s->mb_height*12);
1875 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1878 for (i = 0; i < context_count; i++) {
1879 int start_y = s->thread_context[i]->start_mb_y;
1880 int end_y = s->thread_context[i]-> end_mb_y;
1881 int h = s->mb_height;
1882 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1883 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1885 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1888 s->pict_type = s->new_picture.f->pict_type;
1890 ret = frame_start(s);
1894 ret = encode_picture(s, s->picture_number);
1895 if (growing_buffer) {
1896 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1897 pkt->data = s->pb.buf;
1898 pkt->size = avctx->internal->byte_buffer_size;
1903 #if FF_API_STAT_BITS
1904 FF_DISABLE_DEPRECATION_WARNINGS
1905 avctx->header_bits = s->header_bits;
1906 avctx->mv_bits = s->mv_bits;
1907 avctx->misc_bits = s->misc_bits;
1908 avctx->i_tex_bits = s->i_tex_bits;
1909 avctx->p_tex_bits = s->p_tex_bits;
1910 avctx->i_count = s->i_count;
1911 // FIXME f/b_count in avctx
1912 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1913 avctx->skip_count = s->skip_count;
1914 FF_ENABLE_DEPRECATION_WARNINGS
1919 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1920 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1922 if (avctx->rc_buffer_size) {
1923 RateControlContext *rcc = &s->rc_context;
1924 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1925 int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1926 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1928 if (put_bits_count(&s->pb) > max_size &&
1929 s->lambda < s->lmax) {
1930 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1931 (s->qscale + 1) / s->qscale);
1932 if (s->adaptive_quant) {
1934 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1935 s->lambda_table[i] =
1936 FFMAX(s->lambda_table[i] + min_step,
1937 s->lambda_table[i] * (s->qscale + 1) /
1940 s->mb_skipped = 0; // done in frame_start()
1941 // done in encode_picture() so we must undo it
1942 if (s->pict_type == AV_PICTURE_TYPE_P) {
1943 if (s->flipflop_rounding ||
1944 s->codec_id == AV_CODEC_ID_H263P ||
1945 s->codec_id == AV_CODEC_ID_MPEG4)
1946 s->no_rounding ^= 1;
1948 if (s->pict_type != AV_PICTURE_TYPE_B) {
1949 s->time_base = s->last_time_base;
1950 s->last_non_b_time = s->time - s->pp_time;
1952 for (i = 0; i < context_count; i++) {
1953 PutBitContext *pb = &s->thread_context[i]->pb;
1954 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1956 s->vbv_ignore_qmax = 1;
1957 av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1961 av_assert0(avctx->rc_max_rate);
1964 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1965 ff_write_pass1_stats(s);
1967 for (i = 0; i < 4; i++) {
1968 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1969 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1971 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1972 s->current_picture_ptr->encoding_error,
1973 (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1976 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1977 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1978 s->misc_bits + s->i_tex_bits +
1980 flush_put_bits(&s->pb);
1981 s->frame_bits = put_bits_count(&s->pb);
1983 stuffing_count = ff_vbv_update(s, s->frame_bits);
1984 s->stuffing_bits = 8*stuffing_count;
1985 if (stuffing_count) {
1986 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1987 stuffing_count + 50) {
1988 av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1992 switch (s->codec_id) {
1993 case AV_CODEC_ID_MPEG1VIDEO:
1994 case AV_CODEC_ID_MPEG2VIDEO:
1995 while (stuffing_count--) {
1996 put_bits(&s->pb, 8, 0);
1999 case AV_CODEC_ID_MPEG4:
2000 put_bits(&s->pb, 16, 0);
2001 put_bits(&s->pb, 16, 0x1C3);
2002 stuffing_count -= 4;
2003 while (stuffing_count--) {
2004 put_bits(&s->pb, 8, 0xFF);
2008 av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2010 flush_put_bits(&s->pb);
2011 s->frame_bits = put_bits_count(&s->pb);
2014 /* update MPEG-1/2 vbv_delay for CBR */
2015 if (avctx->rc_max_rate &&
2016 avctx->rc_min_rate == avctx->rc_max_rate &&
2017 s->out_format == FMT_MPEG1 &&
2018 90000LL * (avctx->rc_buffer_size - 1) <=
2019 avctx->rc_max_rate * 0xFFFFLL) {
2020 AVCPBProperties *props;
2023 int vbv_delay, min_delay;
2024 double inbits = avctx->rc_max_rate *
2025 av_q2d(avctx->time_base);
2026 int minbits = s->frame_bits - 8 *
2027 (s->vbv_delay_ptr - s->pb.buf - 1);
2028 double bits = s->rc_context.buffer_index + minbits - inbits;
2031 av_log(avctx, AV_LOG_ERROR,
2032 "Internal error, negative bits\n");
2034 av_assert1(s->repeat_first_field == 0);
2036 vbv_delay = bits * 90000 / avctx->rc_max_rate;
2037 min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2040 vbv_delay = FFMAX(vbv_delay, min_delay);
2042 av_assert0(vbv_delay < 0xFFFF);
2044 s->vbv_delay_ptr[0] &= 0xF8;
2045 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2046 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2047 s->vbv_delay_ptr[2] &= 0x07;
2048 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2050 props = av_cpb_properties_alloc(&props_size);
2052 return AVERROR(ENOMEM);
2053 props->vbv_delay = vbv_delay * 300;
2055 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2056 (uint8_t*)props, props_size);
2062 #if FF_API_VBV_DELAY
2063 FF_DISABLE_DEPRECATION_WARNINGS
2064 avctx->vbv_delay = vbv_delay * 300;
2065 FF_ENABLE_DEPRECATION_WARNINGS
2068 s->total_bits += s->frame_bits;
2069 #if FF_API_STAT_BITS
2070 FF_DISABLE_DEPRECATION_WARNINGS
2071 avctx->frame_bits = s->frame_bits;
2072 FF_ENABLE_DEPRECATION_WARNINGS
2076 pkt->pts = s->current_picture.f->pts;
2077 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2078 if (!s->current_picture.f->coded_picture_number)
2079 pkt->dts = pkt->pts - s->dts_delta;
2081 pkt->dts = s->reordered_pts;
2082 s->reordered_pts = pkt->pts;
2084 pkt->dts = pkt->pts;
2085 if (s->current_picture.f->key_frame)
2086 pkt->flags |= AV_PKT_FLAG_KEY;
2088 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2093 /* release non-reference frames */
2094 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2095 if (!s->picture[i].reference)
2096 ff_mpeg_unref_picture(avctx, &s->picture[i]);
2099 av_assert1((s->frame_bits & 7) == 0);
2101 pkt->size = s->frame_bits / 8;
2102 *got_packet = !!pkt->size;
2106 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2107 int n, int threshold)
2109 static const char tab[64] = {
2110 3, 2, 2, 1, 1, 1, 1, 1,
2111 1, 1, 1, 1, 1, 1, 1, 1,
2112 1, 1, 1, 1, 1, 1, 1, 1,
2113 0, 0, 0, 0, 0, 0, 0, 0,
2114 0, 0, 0, 0, 0, 0, 0, 0,
2115 0, 0, 0, 0, 0, 0, 0, 0,
2116 0, 0, 0, 0, 0, 0, 0, 0,
2117 0, 0, 0, 0, 0, 0, 0, 0
2122 int16_t *block = s->block[n];
2123 const int last_index = s->block_last_index[n];
2126 if (threshold < 0) {
2128 threshold = -threshold;
2132 /* Are all we could set to zero already zero? */
2133 if (last_index <= skip_dc - 1)
2136 for (i = 0; i <= last_index; i++) {
2137 const int j = s->intra_scantable.permutated[i];
2138 const int level = FFABS(block[j]);
2140 if (skip_dc && i == 0)
2144 } else if (level > 1) {
2150 if (score >= threshold)
2152 for (i = skip_dc; i <= last_index; i++) {
2153 const int j = s->intra_scantable.permutated[i];
2157 s->block_last_index[n] = 0;
2159 s->block_last_index[n] = -1;
2162 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2166 const int maxlevel = s->max_qcoeff;
2167 const int minlevel = s->min_qcoeff;
2171 i = 1; // skip clipping of intra dc
2175 for (; i <= last_index; i++) {
2176 const int j = s->intra_scantable.permutated[i];
2177 int level = block[j];
2179 if (level > maxlevel) {
2182 } else if (level < minlevel) {
2190 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2191 av_log(s->avctx, AV_LOG_INFO,
2192 "warning, clipping %d dct coefficients to %d..%d\n",
2193 overflow, minlevel, maxlevel);
2196 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2200 for (y = 0; y < 8; y++) {
2201 for (x = 0; x < 8; x++) {
2207 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2208 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2209 int v = ptr[x2 + y2 * stride];
2215 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2220 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2221 int motion_x, int motion_y,
2222 int mb_block_height,
2226 int16_t weight[12][64];
2227 int16_t orig[12][64];
2228 const int mb_x = s->mb_x;
2229 const int mb_y = s->mb_y;
2232 int dct_offset = s->linesize * 8; // default for progressive frames
2233 int uv_dct_offset = s->uvlinesize * 8;
2234 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2235 ptrdiff_t wrap_y, wrap_c;
2237 for (i = 0; i < mb_block_count; i++)
2238 skip_dct[i] = s->skipdct;
2240 if (s->adaptive_quant) {
2241 const int last_qp = s->qscale;
2242 const int mb_xy = mb_x + mb_y * s->mb_stride;
2244 s->lambda = s->lambda_table[mb_xy];
2247 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2248 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2249 s->dquant = s->qscale - last_qp;
2251 if (s->out_format == FMT_H263) {
2252 s->dquant = av_clip(s->dquant, -2, 2);
2254 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2256 if (s->pict_type == AV_PICTURE_TYPE_B) {
2257 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2260 if (s->mv_type == MV_TYPE_8X8)
2266 ff_set_qscale(s, last_qp + s->dquant);
2267 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2268 ff_set_qscale(s, s->qscale + s->dquant);
2270 wrap_y = s->linesize;
2271 wrap_c = s->uvlinesize;
2272 ptr_y = s->new_picture.f->data[0] +
2273 (mb_y * 16 * wrap_y) + mb_x * 16;
2274 ptr_cb = s->new_picture.f->data[1] +
2275 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2276 ptr_cr = s->new_picture.f->data[2] +
2277 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2279 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2280 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2281 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2282 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2283 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2285 16, 16, mb_x * 16, mb_y * 16,
2286 s->width, s->height);
2288 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2290 mb_block_width, mb_block_height,
2291 mb_x * mb_block_width, mb_y * mb_block_height,
2293 ptr_cb = ebuf + 16 * wrap_y;
2294 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2296 mb_block_width, mb_block_height,
2297 mb_x * mb_block_width, mb_y * mb_block_height,
2299 ptr_cr = ebuf + 16 * wrap_y + 16;
2303 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2304 int progressive_score, interlaced_score;
2306 s->interlaced_dct = 0;
2307 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2308 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2309 NULL, wrap_y, 8) - 400;
2311 if (progressive_score > 0) {
2312 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2313 NULL, wrap_y * 2, 8) +
2314 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2315 NULL, wrap_y * 2, 8);
2316 if (progressive_score > interlaced_score) {
2317 s->interlaced_dct = 1;
2319 dct_offset = wrap_y;
2320 uv_dct_offset = wrap_c;
2322 if (s->chroma_format == CHROMA_422 ||
2323 s->chroma_format == CHROMA_444)
2329 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2330 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2331 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2332 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2334 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2338 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2339 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2340 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2341 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2342 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2343 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2344 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2345 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2346 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2347 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2348 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2349 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2353 op_pixels_func (*op_pix)[4];
2354 qpel_mc_func (*op_qpix)[16];
2355 uint8_t *dest_y, *dest_cb, *dest_cr;
2357 dest_y = s->dest[0];
2358 dest_cb = s->dest[1];
2359 dest_cr = s->dest[2];
2361 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2362 op_pix = s->hdsp.put_pixels_tab;
2363 op_qpix = s->qdsp.put_qpel_pixels_tab;
2365 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2366 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2369 if (s->mv_dir & MV_DIR_FORWARD) {
2370 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2371 s->last_picture.f->data,
2373 op_pix = s->hdsp.avg_pixels_tab;
2374 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2376 if (s->mv_dir & MV_DIR_BACKWARD) {
2377 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2378 s->next_picture.f->data,
2382 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2383 int progressive_score, interlaced_score;
2385 s->interlaced_dct = 0;
2386 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2387 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2391 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2392 progressive_score -= 400;
2394 if (progressive_score > 0) {
2395 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2397 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2401 if (progressive_score > interlaced_score) {
2402 s->interlaced_dct = 1;
2404 dct_offset = wrap_y;
2405 uv_dct_offset = wrap_c;
2407 if (s->chroma_format == CHROMA_422)
2413 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2414 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2415 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2416 dest_y + dct_offset, wrap_y);
2417 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2418 dest_y + dct_offset + 8, wrap_y);
2420 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2424 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2425 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2426 if (!s->chroma_y_shift) { /* 422 */
2427 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2428 dest_cb + uv_dct_offset, wrap_c);
2429 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2430 dest_cr + uv_dct_offset, wrap_c);
2433 /* pre quantization */
2434 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2435 2 * s->qscale * s->qscale) {
2437 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2439 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2441 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2442 wrap_y, 8) < 20 * s->qscale)
2444 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2445 wrap_y, 8) < 20 * s->qscale)
2447 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2449 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2451 if (!s->chroma_y_shift) { /* 422 */
2452 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2453 dest_cb + uv_dct_offset,
2454 wrap_c, 8) < 20 * s->qscale)
2456 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2457 dest_cr + uv_dct_offset,
2458 wrap_c, 8) < 20 * s->qscale)
2464 if (s->quantizer_noise_shaping) {
2466 get_visual_weight(weight[0], ptr_y , wrap_y);
2468 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2470 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2472 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2474 get_visual_weight(weight[4], ptr_cb , wrap_c);
2476 get_visual_weight(weight[5], ptr_cr , wrap_c);
2477 if (!s->chroma_y_shift) { /* 422 */
2479 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2482 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2485 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2488 /* DCT & quantize */
2489 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2491 for (i = 0; i < mb_block_count; i++) {
2494 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2495 // FIXME we could decide to change to quantizer instead of
2497 // JS: I don't think that would be a good idea it could lower
2498 // quality instead of improve it. Just INTRADC clipping
2499 // deserves changes in quantizer
2501 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2503 s->block_last_index[i] = -1;
2505 if (s->quantizer_noise_shaping) {
2506 for (i = 0; i < mb_block_count; i++) {
2508 s->block_last_index[i] =
2509 dct_quantize_refine(s, s->block[i], weight[i],
2510 orig[i], i, s->qscale);
2515 if (s->luma_elim_threshold && !s->mb_intra)
2516 for (i = 0; i < 4; i++)
2517 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2518 if (s->chroma_elim_threshold && !s->mb_intra)
2519 for (i = 4; i < mb_block_count; i++)
2520 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2522 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2523 for (i = 0; i < mb_block_count; i++) {
2524 if (s->block_last_index[i] == -1)
2525 s->coded_score[i] = INT_MAX / 256;
2530 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2531 s->block_last_index[4] =
2532 s->block_last_index[5] = 0;
2534 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2535 if (!s->chroma_y_shift) { /* 422 / 444 */
2536 for (i=6; i<12; i++) {
2537 s->block_last_index[i] = 0;
2538 s->block[i][0] = s->block[4][0];
2543 // non c quantize code returns incorrect block_last_index FIXME
2544 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2545 for (i = 0; i < mb_block_count; i++) {
2547 if (s->block_last_index[i] > 0) {
2548 for (j = 63; j > 0; j--) {
2549 if (s->block[i][s->intra_scantable.permutated[j]])
2552 s->block_last_index[i] = j;
2557 /* huffman encode */
2558 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2559 case AV_CODEC_ID_MPEG1VIDEO:
2560 case AV_CODEC_ID_MPEG2VIDEO:
2561 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2562 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2564 case AV_CODEC_ID_MPEG4:
2565 if (CONFIG_MPEG4_ENCODER)
2566 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2568 case AV_CODEC_ID_MSMPEG4V2:
2569 case AV_CODEC_ID_MSMPEG4V3:
2570 case AV_CODEC_ID_WMV1:
2571 if (CONFIG_MSMPEG4_ENCODER)
2572 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2574 case AV_CODEC_ID_WMV2:
2575 if (CONFIG_WMV2_ENCODER)
2576 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2578 case AV_CODEC_ID_H261:
2579 if (CONFIG_H261_ENCODER)
2580 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2582 case AV_CODEC_ID_H263:
2583 case AV_CODEC_ID_H263P:
2584 case AV_CODEC_ID_FLV1:
2585 case AV_CODEC_ID_RV10:
2586 case AV_CODEC_ID_RV20:
2587 if (CONFIG_H263_ENCODER)
2588 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2590 case AV_CODEC_ID_MJPEG:
2591 case AV_CODEC_ID_AMV:
2592 if (CONFIG_MJPEG_ENCODER)
2593 ff_mjpeg_encode_mb(s, s->block);
2595 case AV_CODEC_ID_SPEEDHQ:
2596 if (CONFIG_SPEEDHQ_ENCODER)
2597 ff_speedhq_encode_mb(s, s->block);
2604 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2606 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2607 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2608 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2611 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2614 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2617 d->mb_skip_run= s->mb_skip_run;
2619 d->last_dc[i] = s->last_dc[i];
2622 d->mv_bits= s->mv_bits;
2623 d->i_tex_bits= s->i_tex_bits;
2624 d->p_tex_bits= s->p_tex_bits;
2625 d->i_count= s->i_count;
2626 d->f_count= s->f_count;
2627 d->b_count= s->b_count;
2628 d->skip_count= s->skip_count;
2629 d->misc_bits= s->misc_bits;
2633 d->qscale= s->qscale;
2634 d->dquant= s->dquant;
2636 d->esc3_level_length= s->esc3_level_length;
2639 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2642 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2643 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2646 d->mb_skip_run= s->mb_skip_run;
2648 d->last_dc[i] = s->last_dc[i];
2651 d->mv_bits= s->mv_bits;
2652 d->i_tex_bits= s->i_tex_bits;
2653 d->p_tex_bits= s->p_tex_bits;
2654 d->i_count= s->i_count;
2655 d->f_count= s->f_count;
2656 d->b_count= s->b_count;
2657 d->skip_count= s->skip_count;
2658 d->misc_bits= s->misc_bits;
2660 d->mb_intra= s->mb_intra;
2661 d->mb_skipped= s->mb_skipped;
2662 d->mv_type= s->mv_type;
2663 d->mv_dir= s->mv_dir;
2665 if(s->data_partitioning){
2667 d->tex_pb= s->tex_pb;
2671 d->block_last_index[i]= s->block_last_index[i];
2672 d->interlaced_dct= s->interlaced_dct;
2673 d->qscale= s->qscale;
2675 d->esc3_level_length= s->esc3_level_length;
2678 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2679 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2680 int *dmin, int *next_block, int motion_x, int motion_y)
2683 uint8_t *dest_backup[3];
2685 copy_context_before_encode(s, backup, type);
2687 s->block= s->blocks[*next_block];
2688 s->pb= pb[*next_block];
2689 if(s->data_partitioning){
2690 s->pb2 = pb2 [*next_block];
2691 s->tex_pb= tex_pb[*next_block];
2695 memcpy(dest_backup, s->dest, sizeof(s->dest));
2696 s->dest[0] = s->sc.rd_scratchpad;
2697 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2698 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2699 av_assert0(s->linesize >= 32); //FIXME
2702 encode_mb(s, motion_x, motion_y);
2704 score= put_bits_count(&s->pb);
2705 if(s->data_partitioning){
2706 score+= put_bits_count(&s->pb2);
2707 score+= put_bits_count(&s->tex_pb);
2710 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2711 ff_mpv_reconstruct_mb(s, s->block);
2713 score *= s->lambda2;
2714 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2718 memcpy(s->dest, dest_backup, sizeof(s->dest));
2725 copy_context_after_encode(best, s, type);
2729 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2730 const uint32_t *sq = ff_square_tab + 256;
2735 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2736 else if(w==8 && h==8)
2737 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2741 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2750 static int sse_mb(MpegEncContext *s){
2754 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2755 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2758 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2759 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2760 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2761 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2763 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2764 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2765 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2768 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2769 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2770 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2773 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2774 MpegEncContext *s= *(void**)arg;
2778 s->me.dia_size= s->avctx->pre_dia_size;
2779 s->first_slice_line=1;
2780 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2781 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2782 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2784 s->first_slice_line=0;
2792 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2793 MpegEncContext *s= *(void**)arg;
2795 s->me.dia_size= s->avctx->dia_size;
2796 s->first_slice_line=1;
2797 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2798 s->mb_x=0; //for block init below
2799 ff_init_block_index(s);
2800 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2801 s->block_index[0]+=2;
2802 s->block_index[1]+=2;
2803 s->block_index[2]+=2;
2804 s->block_index[3]+=2;
2806 /* compute motion vector & mb_type and store in context */
2807 if(s->pict_type==AV_PICTURE_TYPE_B)
2808 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2810 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2812 s->first_slice_line=0;
2817 static int mb_var_thread(AVCodecContext *c, void *arg){
2818 MpegEncContext *s= *(void**)arg;
2821 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2822 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2825 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2827 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2829 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2830 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2832 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2833 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2834 s->me.mb_var_sum_temp += varc;
2840 static void write_slice_end(MpegEncContext *s){
2841 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2842 if(s->partitioned_frame){
2843 ff_mpeg4_merge_partitions(s);
2846 ff_mpeg4_stuffing(&s->pb);
2847 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2848 ff_mjpeg_encode_stuffing(s);
2849 } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2850 ff_speedhq_end_slice(s);
2853 flush_put_bits(&s->pb);
2855 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2856 s->misc_bits+= get_bits_diff(s);
2859 static void write_mb_info(MpegEncContext *s)
2861 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2862 int offset = put_bits_count(&s->pb);
2863 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2864 int gobn = s->mb_y / s->gob_index;
2866 if (CONFIG_H263_ENCODER)
2867 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2868 bytestream_put_le32(&ptr, offset);
2869 bytestream_put_byte(&ptr, s->qscale);
2870 bytestream_put_byte(&ptr, gobn);
2871 bytestream_put_le16(&ptr, mba);
2872 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2873 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2874 /* 4MV not implemented */
2875 bytestream_put_byte(&ptr, 0); /* hmv2 */
2876 bytestream_put_byte(&ptr, 0); /* vmv2 */
2879 static void update_mb_info(MpegEncContext *s, int startcode)
2883 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2884 s->mb_info_size += 12;
2885 s->prev_mb_info = s->last_mb_info;
2888 s->prev_mb_info = put_bits_count(&s->pb)/8;
2889 /* This might have incremented mb_info_size above, and we return without
2890 * actually writing any info into that slot yet. But in that case,
2891 * this will be called again at the start of the after writing the
2892 * start code, actually writing the mb info. */
2896 s->last_mb_info = put_bits_count(&s->pb)/8;
2897 if (!s->mb_info_size)
2898 s->mb_info_size += 12;
2902 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2904 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2905 && s->slice_context_count == 1
2906 && s->pb.buf == s->avctx->internal->byte_buffer) {
2907 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2908 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2910 uint8_t *new_buffer = NULL;
2911 int new_buffer_size = 0;
2913 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2914 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2915 return AVERROR(ENOMEM);
2920 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2921 s->avctx->internal->byte_buffer_size + size_increase);
2923 return AVERROR(ENOMEM);
2925 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2926 av_free(s->avctx->internal->byte_buffer);
2927 s->avctx->internal->byte_buffer = new_buffer;
2928 s->avctx->internal->byte_buffer_size = new_buffer_size;
2929 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2930 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2931 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2933 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2934 return AVERROR(EINVAL);
2938 static int encode_thread(AVCodecContext *c, void *arg){
2939 MpegEncContext *s= *(void**)arg;
2940 int mb_x, mb_y, mb_y_order;
2941 int chr_h= 16>>s->chroma_y_shift;
2943 MpegEncContext best_s = { 0 }, backup_s;
2944 uint8_t bit_buf[2][MAX_MB_BYTES];
2945 uint8_t bit_buf2[2][MAX_MB_BYTES];
2946 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2947 PutBitContext pb[2], pb2[2], tex_pb[2];
2950 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2951 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2952 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2955 s->last_bits= put_bits_count(&s->pb);
2966 /* init last dc values */
2967 /* note: quant matrix value (8) is implied here */
2968 s->last_dc[i] = 128 << s->intra_dc_precision;
2970 s->current_picture.encoding_error[i] = 0;
2972 if(s->codec_id==AV_CODEC_ID_AMV){
2973 s->last_dc[0] = 128*8/13;
2974 s->last_dc[1] = 128*8/14;
2975 s->last_dc[2] = 128*8/14;
2978 memset(s->last_mv, 0, sizeof(s->last_mv));
2982 switch(s->codec_id){
2983 case AV_CODEC_ID_H263:
2984 case AV_CODEC_ID_H263P:
2985 case AV_CODEC_ID_FLV1:
2986 if (CONFIG_H263_ENCODER)
2987 s->gob_index = H263_GOB_HEIGHT(s->height);
2989 case AV_CODEC_ID_MPEG4:
2990 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2991 ff_mpeg4_init_partitions(s);
2997 s->first_slice_line = 1;
2998 s->ptr_lastgob = s->pb.buf;
2999 for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
3000 if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
3002 mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
3003 if (first_in_slice && mb_y_order != s->start_mb_y)
3004 ff_speedhq_end_slice(s);
3005 s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
3012 ff_set_qscale(s, s->qscale);
3013 ff_init_block_index(s);
3015 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3016 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3017 int mb_type= s->mb_type[xy];
3021 int size_increase = s->avctx->internal->byte_buffer_size/4
3022 + s->mb_width*MAX_MB_BYTES;
3024 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
3025 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
3026 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3029 if(s->data_partitioning){
3030 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3031 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3032 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3038 s->mb_y = mb_y; // moved into loop, can get changed by H.261
3039 ff_update_block_index(s);
3041 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3042 ff_h261_reorder_mb_index(s);
3043 xy= s->mb_y*s->mb_stride + s->mb_x;
3044 mb_type= s->mb_type[xy];
3047 /* write gob / video packet header */
3049 int current_packet_size, is_gob_start;
3051 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3053 is_gob_start = s->rtp_payload_size &&
3054 current_packet_size >= s->rtp_payload_size &&
3057 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3059 switch(s->codec_id){
3060 case AV_CODEC_ID_H263:
3061 case AV_CODEC_ID_H263P:
3062 if(!s->h263_slice_structured)
3063 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3065 case AV_CODEC_ID_MPEG2VIDEO:
3066 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3067 case AV_CODEC_ID_MPEG1VIDEO:
3068 if(s->mb_skip_run) is_gob_start=0;
3070 case AV_CODEC_ID_MJPEG:
3071 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3076 if(s->start_mb_y != mb_y || mb_x!=0){
3079 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3080 ff_mpeg4_init_partitions(s);
3084 av_assert2((put_bits_count(&s->pb)&7) == 0);
3085 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3087 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3088 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3089 int d = 100 / s->error_rate;
3091 current_packet_size=0;
3092 s->pb.buf_ptr= s->ptr_lastgob;
3093 av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3097 #if FF_API_RTP_CALLBACK
3098 FF_DISABLE_DEPRECATION_WARNINGS
3099 if (s->avctx->rtp_callback){
3100 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3101 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3103 FF_ENABLE_DEPRECATION_WARNINGS
3105 update_mb_info(s, 1);
3107 switch(s->codec_id){
3108 case AV_CODEC_ID_MPEG4:
3109 if (CONFIG_MPEG4_ENCODER) {
3110 ff_mpeg4_encode_video_packet_header(s);
3111 ff_mpeg4_clean_buffers(s);
3114 case AV_CODEC_ID_MPEG1VIDEO:
3115 case AV_CODEC_ID_MPEG2VIDEO:
3116 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3117 ff_mpeg1_encode_slice_header(s);
3118 ff_mpeg1_clean_buffers(s);
3121 case AV_CODEC_ID_H263:
3122 case AV_CODEC_ID_H263P:
3123 if (CONFIG_H263_ENCODER)
3124 ff_h263_encode_gob_header(s, mb_y);
3128 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3129 int bits= put_bits_count(&s->pb);
3130 s->misc_bits+= bits - s->last_bits;
3134 s->ptr_lastgob += current_packet_size;
3135 s->first_slice_line=1;
3136 s->resync_mb_x=mb_x;
3137 s->resync_mb_y=mb_y;
3141 if( (s->resync_mb_x == s->mb_x)
3142 && s->resync_mb_y+1 == s->mb_y){
3143 s->first_slice_line=0;
3147 s->dquant=0; //only for QP_RD
3149 update_mb_info(s, 0);
3151 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3153 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3155 copy_context_before_encode(&backup_s, s, -1);
3157 best_s.data_partitioning= s->data_partitioning;
3158 best_s.partitioned_frame= s->partitioned_frame;
3159 if(s->data_partitioning){
3160 backup_s.pb2= s->pb2;
3161 backup_s.tex_pb= s->tex_pb;
3164 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3165 s->mv_dir = MV_DIR_FORWARD;
3166 s->mv_type = MV_TYPE_16X16;
3168 s->mv[0][0][0] = s->p_mv_table[xy][0];
3169 s->mv[0][0][1] = s->p_mv_table[xy][1];
3170 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3171 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3173 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3174 s->mv_dir = MV_DIR_FORWARD;
3175 s->mv_type = MV_TYPE_FIELD;
3178 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3179 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3180 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3182 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3183 &dmin, &next_block, 0, 0);
3185 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3186 s->mv_dir = MV_DIR_FORWARD;
3187 s->mv_type = MV_TYPE_16X16;
3191 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3192 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3194 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3195 s->mv_dir = MV_DIR_FORWARD;
3196 s->mv_type = MV_TYPE_8X8;
3199 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3200 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3202 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3203 &dmin, &next_block, 0, 0);
3205 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3206 s->mv_dir = MV_DIR_FORWARD;
3207 s->mv_type = MV_TYPE_16X16;
3209 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3210 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3211 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3212 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3214 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3215 s->mv_dir = MV_DIR_BACKWARD;
3216 s->mv_type = MV_TYPE_16X16;
3218 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3219 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3220 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3221 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3223 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3224 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3225 s->mv_type = MV_TYPE_16X16;
3227 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3228 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3229 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3230 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3231 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3232 &dmin, &next_block, 0, 0);
3234 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3235 s->mv_dir = MV_DIR_FORWARD;
3236 s->mv_type = MV_TYPE_FIELD;
3239 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3240 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3241 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3243 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3244 &dmin, &next_block, 0, 0);
3246 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3247 s->mv_dir = MV_DIR_BACKWARD;
3248 s->mv_type = MV_TYPE_FIELD;
3251 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3252 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3253 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3255 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3256 &dmin, &next_block, 0, 0);
3258 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3259 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3260 s->mv_type = MV_TYPE_FIELD;
3262 for(dir=0; dir<2; dir++){
3264 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3265 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3266 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3269 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3270 &dmin, &next_block, 0, 0);
3272 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3274 s->mv_type = MV_TYPE_16X16;
3278 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3279 &dmin, &next_block, 0, 0);
3280 if(s->h263_pred || s->h263_aic){
3282 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3284 ff_clean_intra_table_entries(s); //old mode?
3288 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3289 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3290 const int last_qp= backup_s.qscale;
3293 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3294 static const int dquant_tab[4]={-1,1,-2,2};
3295 int storecoefs = s->mb_intra && s->dc_val[0];
3297 av_assert2(backup_s.dquant == 0);
3300 s->mv_dir= best_s.mv_dir;
3301 s->mv_type = MV_TYPE_16X16;
3302 s->mb_intra= best_s.mb_intra;
3303 s->mv[0][0][0] = best_s.mv[0][0][0];
3304 s->mv[0][0][1] = best_s.mv[0][0][1];
3305 s->mv[1][0][0] = best_s.mv[1][0][0];
3306 s->mv[1][0][1] = best_s.mv[1][0][1];
3308 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3309 for(; qpi<4; qpi++){
3310 int dquant= dquant_tab[qpi];
3311 qp= last_qp + dquant;
3312 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3314 backup_s.dquant= dquant;
3317 dc[i]= s->dc_val[0][ s->block_index[i] ];
3318 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3322 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3323 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3324 if(best_s.qscale != qp){
3327 s->dc_val[0][ s->block_index[i] ]= dc[i];
3328 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3335 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3336 int mx= s->b_direct_mv_table[xy][0];
3337 int my= s->b_direct_mv_table[xy][1];
3339 backup_s.dquant = 0;
3340 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3342 ff_mpeg4_set_direct_mv(s, mx, my);
3343 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3344 &dmin, &next_block, mx, my);
3346 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3347 backup_s.dquant = 0;
3348 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3350 ff_mpeg4_set_direct_mv(s, 0, 0);
3351 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3352 &dmin, &next_block, 0, 0);
3354 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3357 coded |= s->block_last_index[i];
3360 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3361 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3362 mx=my=0; //FIXME find the one we actually used
3363 ff_mpeg4_set_direct_mv(s, mx, my);
3364 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3372 s->mv_dir= best_s.mv_dir;
3373 s->mv_type = best_s.mv_type;
3375 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3376 s->mv[0][0][1] = best_s.mv[0][0][1];
3377 s->mv[1][0][0] = best_s.mv[1][0][0];
3378 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3381 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3382 &dmin, &next_block, mx, my);
3387 s->current_picture.qscale_table[xy] = best_s.qscale;
3389 copy_context_after_encode(s, &best_s, -1);
3391 pb_bits_count= put_bits_count(&s->pb);
3392 flush_put_bits(&s->pb);
3393 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3396 if(s->data_partitioning){
3397 pb2_bits_count= put_bits_count(&s->pb2);
3398 flush_put_bits(&s->pb2);
3399 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3400 s->pb2= backup_s.pb2;
3402 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3403 flush_put_bits(&s->tex_pb);
3404 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3405 s->tex_pb= backup_s.tex_pb;
3407 s->last_bits= put_bits_count(&s->pb);
3409 if (CONFIG_H263_ENCODER &&
3410 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3411 ff_h263_update_motion_val(s);
3413 if(next_block==0){ //FIXME 16 vs linesize16
3414 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3415 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3416 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3419 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3420 ff_mpv_reconstruct_mb(s, s->block);
3422 int motion_x = 0, motion_y = 0;
3423 s->mv_type=MV_TYPE_16X16;
3424 // only one MB-Type possible
3427 case CANDIDATE_MB_TYPE_INTRA:
3430 motion_x= s->mv[0][0][0] = 0;
3431 motion_y= s->mv[0][0][1] = 0;
3433 case CANDIDATE_MB_TYPE_INTER:
3434 s->mv_dir = MV_DIR_FORWARD;
3436 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3437 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3439 case CANDIDATE_MB_TYPE_INTER_I:
3440 s->mv_dir = MV_DIR_FORWARD;
3441 s->mv_type = MV_TYPE_FIELD;
3444 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3445 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3446 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3449 case CANDIDATE_MB_TYPE_INTER4V:
3450 s->mv_dir = MV_DIR_FORWARD;
3451 s->mv_type = MV_TYPE_8X8;
3454 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3455 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3458 case CANDIDATE_MB_TYPE_DIRECT:
3459 if (CONFIG_MPEG4_ENCODER) {
3460 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3462 motion_x=s->b_direct_mv_table[xy][0];
3463 motion_y=s->b_direct_mv_table[xy][1];
3464 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3467 case CANDIDATE_MB_TYPE_DIRECT0:
3468 if (CONFIG_MPEG4_ENCODER) {
3469 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3471 ff_mpeg4_set_direct_mv(s, 0, 0);
3474 case CANDIDATE_MB_TYPE_BIDIR:
3475 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3477 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3478 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3479 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3480 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3482 case CANDIDATE_MB_TYPE_BACKWARD:
3483 s->mv_dir = MV_DIR_BACKWARD;
3485 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3486 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3488 case CANDIDATE_MB_TYPE_FORWARD:
3489 s->mv_dir = MV_DIR_FORWARD;
3491 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3492 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3494 case CANDIDATE_MB_TYPE_FORWARD_I:
3495 s->mv_dir = MV_DIR_FORWARD;
3496 s->mv_type = MV_TYPE_FIELD;
3499 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3500 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3501 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3504 case CANDIDATE_MB_TYPE_BACKWARD_I:
3505 s->mv_dir = MV_DIR_BACKWARD;
3506 s->mv_type = MV_TYPE_FIELD;
3509 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3510 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3511 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3514 case CANDIDATE_MB_TYPE_BIDIR_I:
3515 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3516 s->mv_type = MV_TYPE_FIELD;
3518 for(dir=0; dir<2; dir++){
3520 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3521 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3522 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3527 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3530 encode_mb(s, motion_x, motion_y);
3532 // RAL: Update last macroblock type
3533 s->last_mv_dir = s->mv_dir;
3535 if (CONFIG_H263_ENCODER &&
3536 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3537 ff_h263_update_motion_val(s);
3539 ff_mpv_reconstruct_mb(s, s->block);
3542 /* clean the MV table in IPS frames for direct mode in B-frames */
3543 if(s->mb_intra /* && I,P,S_TYPE */){
3544 s->p_mv_table[xy][0]=0;
3545 s->p_mv_table[xy][1]=0;
3548 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3552 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3553 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3555 s->current_picture.encoding_error[0] += sse(
3556 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3557 s->dest[0], w, h, s->linesize);
3558 s->current_picture.encoding_error[1] += sse(
3559 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3560 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3561 s->current_picture.encoding_error[2] += sse(
3562 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3563 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3566 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3567 ff_h263_loop_filter(s);
3569 ff_dlog(s->avctx, "MB %d %d bits\n",
3570 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3574 //not beautiful here but we must write it before flushing so it has to be here
3575 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3576 ff_msmpeg4_encode_ext_header(s);
3580 #if FF_API_RTP_CALLBACK
3581 FF_DISABLE_DEPRECATION_WARNINGS
3582 /* Send the last GOB if RTP */
3583 if (s->avctx->rtp_callback) {
3584 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3585 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3586 /* Call the RTP callback to send the last GOB */
3588 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3590 FF_ENABLE_DEPRECATION_WARNINGS
3596 #define MERGE(field) dst->field += src->field; src->field=0
3597 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3598 MERGE(me.scene_change_score);
3599 MERGE(me.mc_mb_var_sum_temp);
3600 MERGE(me.mb_var_sum_temp);
3603 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3606 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3607 MERGE(dct_count[1]);
3616 MERGE(er.error_count);
3617 MERGE(padding_bug_score);
3618 MERGE(current_picture.encoding_error[0]);
3619 MERGE(current_picture.encoding_error[1]);
3620 MERGE(current_picture.encoding_error[2]);
3622 if (dst->noise_reduction){
3623 for(i=0; i<64; i++){
3624 MERGE(dct_error_sum[0][i]);
3625 MERGE(dct_error_sum[1][i]);
3629 av_assert1(put_bits_count(&src->pb) % 8 ==0);
3630 av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3631 ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3632 flush_put_bits(&dst->pb);
3635 static int estimate_qp(MpegEncContext *s, int dry_run){
3636 if (s->next_lambda){
3637 s->current_picture_ptr->f->quality =
3638 s->current_picture.f->quality = s->next_lambda;
3639 if(!dry_run) s->next_lambda= 0;
3640 } else if (!s->fixed_qscale) {
3641 int quality = ff_rate_estimate_qscale(s, dry_run);
3642 s->current_picture_ptr->f->quality =
3643 s->current_picture.f->quality = quality;
3644 if (s->current_picture.f->quality < 0)
3648 if(s->adaptive_quant){
3649 switch(s->codec_id){
3650 case AV_CODEC_ID_MPEG4:
3651 if (CONFIG_MPEG4_ENCODER)
3652 ff_clean_mpeg4_qscales(s);
3654 case AV_CODEC_ID_H263:
3655 case AV_CODEC_ID_H263P:
3656 case AV_CODEC_ID_FLV1:
3657 if (CONFIG_H263_ENCODER)
3658 ff_clean_h263_qscales(s);
3661 ff_init_qscale_tab(s);
3664 s->lambda= s->lambda_table[0];
3667 s->lambda = s->current_picture.f->quality;
3672 /* must be called before writing the header */
3673 static void set_frame_distances(MpegEncContext * s){
3674 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3675 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3677 if(s->pict_type==AV_PICTURE_TYPE_B){
3678 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3679 av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3681 s->pp_time= s->time - s->last_non_b_time;
3682 s->last_non_b_time= s->time;
3683 av_assert1(s->picture_number==0 || s->pp_time > 0);
3687 static int encode_picture(MpegEncContext *s, int picture_number)
3691 int context_count = s->slice_context_count;
3693 s->picture_number = picture_number;
3695 /* Reset the average MB variance */
3696 s->me.mb_var_sum_temp =
3697 s->me.mc_mb_var_sum_temp = 0;
3699 /* we need to initialize some time vars before we can encode B-frames */
3700 // RAL: Condition added for MPEG1VIDEO
3701 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3702 set_frame_distances(s);
3703 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3704 ff_set_mpeg4_time(s);
3706 s->me.scene_change_score=0;
3708 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3710 if(s->pict_type==AV_PICTURE_TYPE_I){
3711 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3712 else s->no_rounding=0;
3713 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3714 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3715 s->no_rounding ^= 1;
3718 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3719 if (estimate_qp(s,1) < 0)
3721 ff_get_2pass_fcode(s);
3722 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3723 if(s->pict_type==AV_PICTURE_TYPE_B)
3724 s->lambda= s->last_lambda_for[s->pict_type];
3726 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3730 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3731 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3732 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3733 s->q_chroma_intra_matrix = s->q_intra_matrix;
3734 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3737 s->mb_intra=0; //for the rate distortion & bit compare functions
3738 for(i=1; i<context_count; i++){
3739 ret = ff_update_duplicate_context(s->thread_context[i], s);
3747 /* Estimate motion for every MB */
3748 if(s->pict_type != AV_PICTURE_TYPE_I){
3749 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3750 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3751 if (s->pict_type != AV_PICTURE_TYPE_B) {
3752 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3754 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3758 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3759 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3761 for(i=0; i<s->mb_stride*s->mb_height; i++)
3762 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3764 if(!s->fixed_qscale){
3765 /* finding spatial complexity for I-frame rate control */
3766 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3769 for(i=1; i<context_count; i++){
3770 merge_context_after_me(s, s->thread_context[i]);
3772 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3773 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3776 if (s->me.scene_change_score > s->scenechange_threshold &&
3777 s->pict_type == AV_PICTURE_TYPE_P) {
3778 s->pict_type= AV_PICTURE_TYPE_I;
3779 for(i=0; i<s->mb_stride*s->mb_height; i++)
3780 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3781 if(s->msmpeg4_version >= 3)
3783 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3784 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3788 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3789 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3791 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3793 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3794 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3795 s->f_code= FFMAX3(s->f_code, a, b);
3798 ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
3799 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3800 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3804 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3805 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3810 if(s->pict_type==AV_PICTURE_TYPE_B){
3813 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3814 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3815 s->f_code = FFMAX(a, b);
3817 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3818 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3819 s->b_code = FFMAX(a, b);
3821 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3822 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3823 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3824 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3825 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3827 for(dir=0; dir<2; dir++){
3830 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3831 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3832 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3833 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3841 if (estimate_qp(s, 0) < 0)
3844 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3845 s->pict_type == AV_PICTURE_TYPE_I &&
3846 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3847 s->qscale= 3; //reduce clipping problems
3849 if (s->out_format == FMT_MJPEG) {
3850 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3851 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3853 if (s->avctx->intra_matrix) {
3855 luma_matrix = s->avctx->intra_matrix;
3857 if (s->avctx->chroma_intra_matrix)
3858 chroma_matrix = s->avctx->chroma_intra_matrix;
3860 /* for mjpeg, we do include qscale in the matrix */
3862 int j = s->idsp.idct_permutation[i];
3864 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3865 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3867 s->y_dc_scale_table=
3868 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3869 s->chroma_intra_matrix[0] =
3870 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3871 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3872 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3873 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3874 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3877 if(s->codec_id == AV_CODEC_ID_AMV){
3878 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3879 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3881 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3883 s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3884 s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3886 s->y_dc_scale_table= y;
3887 s->c_dc_scale_table= c;
3888 s->intra_matrix[0] = 13;
3889 s->chroma_intra_matrix[0] = 14;
3890 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3891 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3892 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3893 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3897 if (s->out_format == FMT_SPEEDHQ) {
3898 s->y_dc_scale_table=
3899 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3902 //FIXME var duplication
3903 s->current_picture_ptr->f->key_frame =
3904 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3905 s->current_picture_ptr->f->pict_type =
3906 s->current_picture.f->pict_type = s->pict_type;
3908 if (s->current_picture.f->key_frame)
3909 s->picture_in_gop_number=0;
3911 s->mb_x = s->mb_y = 0;
3912 s->last_bits= put_bits_count(&s->pb);
3913 switch(s->out_format) {
3915 if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3916 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3917 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3920 if (CONFIG_SPEEDHQ_ENCODER)
3921 ff_speedhq_encode_picture_header(s);
3924 if (CONFIG_H261_ENCODER)
3925 ff_h261_encode_picture_header(s, picture_number);
3928 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3929 ff_wmv2_encode_picture_header(s, picture_number);
3930 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3931 ff_msmpeg4_encode_picture_header(s, picture_number);
3932 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3933 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3936 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3937 ret = ff_rv10_encode_picture_header(s, picture_number);
3941 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3942 ff_rv20_encode_picture_header(s, picture_number);
3943 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3944 ff_flv_encode_picture_header(s, picture_number);
3945 else if (CONFIG_H263_ENCODER)
3946 ff_h263_encode_picture_header(s, picture_number);
3949 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3950 ff_mpeg1_encode_picture_header(s, picture_number);
3955 bits= put_bits_count(&s->pb);
3956 s->header_bits= bits - s->last_bits;
3958 for(i=1; i<context_count; i++){
3959 update_duplicate_context_after_me(s->thread_context[i], s);
3961 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3962 for(i=1; i<context_count; i++){
3963 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3964 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3965 merge_context_after_encode(s, s->thread_context[i]);
3971 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3972 const int intra= s->mb_intra;
3975 s->dct_count[intra]++;
3977 for(i=0; i<64; i++){
3978 int level= block[i];
3982 s->dct_error_sum[intra][i] += level;
3983 level -= s->dct_offset[intra][i];
3984 if(level<0) level=0;
3986 s->dct_error_sum[intra][i] -= level;
3987 level += s->dct_offset[intra][i];
3988 if(level>0) level=0;
3995 static int dct_quantize_trellis_c(MpegEncContext *s,
3996 int16_t *block, int n,
3997 int qscale, int *overflow){
3999 const uint16_t *matrix;
4000 const uint8_t *scantable;
4001 const uint8_t *perm_scantable;
4003 unsigned int threshold1, threshold2;
4015 int coeff_count[64];
4016 int qmul, qadd, start_i, last_non_zero, i, dc;
4017 const int esc_length= s->ac_esc_length;
4019 uint8_t * last_length;
4020 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4023 s->fdsp.fdct(block);
4025 if(s->dct_error_sum)
4026 s->denoise_dct(s, block);
4028 qadd= ((qscale-1)|1)*8;
4030 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4031 else mpeg2_qscale = qscale << 1;
4035 scantable= s->intra_scantable.scantable;
4036 perm_scantable= s->intra_scantable.permutated;
4044 /* For AIC we skip quant/dequant of INTRADC */
4049 /* note: block[0] is assumed to be positive */
4050 block[0] = (block[0] + (q >> 1)) / q;
4053 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4054 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4055 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4056 bias= 1<<(QMAT_SHIFT-1);
4058 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4059 length = s->intra_chroma_ac_vlc_length;
4060 last_length= s->intra_chroma_ac_vlc_last_length;
4062 length = s->intra_ac_vlc_length;
4063 last_length= s->intra_ac_vlc_last_length;
4066 scantable= s->inter_scantable.scantable;
4067 perm_scantable= s->inter_scantable.permutated;
4070 qmat = s->q_inter_matrix[qscale];
4071 matrix = s->inter_matrix;
4072 length = s->inter_ac_vlc_length;
4073 last_length= s->inter_ac_vlc_last_length;
4077 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4078 threshold2= (threshold1<<1);
4080 for(i=63; i>=start_i; i--) {
4081 const int j = scantable[i];
4082 int level = block[j] * qmat[j];
4084 if(((unsigned)(level+threshold1))>threshold2){
4090 for(i=start_i; i<=last_non_zero; i++) {
4091 const int j = scantable[i];
4092 int level = block[j] * qmat[j];
4094 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4095 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4096 if(((unsigned)(level+threshold1))>threshold2){
4098 level= (bias + level)>>QMAT_SHIFT;
4100 coeff[1][i]= level-1;
4101 // coeff[2][k]= level-2;
4103 level= (bias - level)>>QMAT_SHIFT;
4104 coeff[0][i]= -level;
4105 coeff[1][i]= -level+1;
4106 // coeff[2][k]= -level+2;
4108 coeff_count[i]= FFMIN(level, 2);
4109 av_assert2(coeff_count[i]);
4112 coeff[0][i]= (level>>31)|1;
4117 *overflow= s->max_qcoeff < max; //overflow might have happened
4119 if(last_non_zero < start_i){
4120 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4121 return last_non_zero;
4124 score_tab[start_i]= 0;
4125 survivor[0]= start_i;
4128 for(i=start_i; i<=last_non_zero; i++){
4129 int level_index, j, zero_distortion;
4130 int dct_coeff= FFABS(block[ scantable[i] ]);
4131 int best_score=256*256*256*120;
4133 if (s->fdsp.fdct == ff_fdct_ifast)
4134 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4135 zero_distortion= dct_coeff*dct_coeff;
4137 for(level_index=0; level_index < coeff_count[i]; level_index++){
4139 int level= coeff[level_index][i];
4140 const int alevel= FFABS(level);
4145 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4146 unquant_coeff= alevel*qmul + qadd;
4147 } else if(s->out_format == FMT_MJPEG) {
4148 j = s->idsp.idct_permutation[scantable[i]];
4149 unquant_coeff = alevel * matrix[j] * 8;
4151 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4153 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4154 unquant_coeff = (unquant_coeff - 1) | 1;
4156 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4157 unquant_coeff = (unquant_coeff - 1) | 1;
4162 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4164 if((level&(~127)) == 0){
4165 for(j=survivor_count-1; j>=0; j--){
4166 int run= i - survivor[j];
4167 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4168 score += score_tab[i-run];
4170 if(score < best_score){
4173 level_tab[i+1]= level-64;
4177 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4178 for(j=survivor_count-1; j>=0; j--){
4179 int run= i - survivor[j];
4180 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4181 score += score_tab[i-run];
4182 if(score < last_score){
4185 last_level= level-64;
4191 distortion += esc_length*lambda;
4192 for(j=survivor_count-1; j>=0; j--){
4193 int run= i - survivor[j];
4194 int score= distortion + score_tab[i-run];
4196 if(score < best_score){
4199 level_tab[i+1]= level-64;
4203 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4204 for(j=survivor_count-1; j>=0; j--){
4205 int run= i - survivor[j];
4206 int score= distortion + score_tab[i-run];
4207 if(score < last_score){
4210 last_level= level-64;
4218 score_tab[i+1]= best_score;
4220 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4221 if(last_non_zero <= 27){
4222 for(; survivor_count; survivor_count--){
4223 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4227 for(; survivor_count; survivor_count--){
4228 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4233 survivor[ survivor_count++ ]= i+1;
4236 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4237 last_score= 256*256*256*120;
4238 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4239 int score= score_tab[i];
4241 score += lambda * 2; // FIXME more exact?
4243 if(score < last_score){
4246 last_level= level_tab[i];
4247 last_run= run_tab[i];
4252 s->coded_score[n] = last_score;
4254 dc= FFABS(block[0]);
4255 last_non_zero= last_i - 1;
4256 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4258 if(last_non_zero < start_i)
4259 return last_non_zero;
4261 if(last_non_zero == 0 && start_i == 0){
4263 int best_score= dc * dc;
4265 for(i=0; i<coeff_count[0]; i++){
4266 int level= coeff[i][0];
4267 int alevel= FFABS(level);
4268 int unquant_coeff, score, distortion;
4270 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4271 unquant_coeff= (alevel*qmul + qadd)>>3;
4273 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4274 unquant_coeff = (unquant_coeff - 1) | 1;
4276 unquant_coeff = (unquant_coeff + 4) >> 3;
4277 unquant_coeff<<= 3 + 3;
4279 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4281 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4282 else score= distortion + esc_length*lambda;
4284 if(score < best_score){
4286 best_level= level - 64;
4289 block[0]= best_level;
4290 s->coded_score[n] = best_score - dc*dc;
4291 if(best_level == 0) return -1;
4292 else return last_non_zero;
4296 av_assert2(last_level);
4298 block[ perm_scantable[last_non_zero] ]= last_level;
4301 for(; i>start_i; i -= run_tab[i] + 1){
4302 block[ perm_scantable[i-1] ]= level_tab[i];
4305 return last_non_zero;
4308 static int16_t basis[64][64];
4310 static void build_basis(uint8_t *perm){
4317 double s= 0.25*(1<<BASIS_SHIFT);
4319 int perm_index= perm[index];
4320 if(i==0) s*= sqrt(0.5);
4321 if(j==0) s*= sqrt(0.5);
4322 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4329 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4330 int16_t *block, int16_t *weight, int16_t *orig,
4333 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4334 const uint8_t *scantable;
4335 const uint8_t *perm_scantable;
4336 // unsigned int threshold1, threshold2;
4341 int qmul, qadd, start_i, last_non_zero, i, dc;
4343 uint8_t * last_length;
4345 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4347 if(basis[0][0] == 0)
4348 build_basis(s->idsp.idct_permutation);
4353 scantable= s->intra_scantable.scantable;
4354 perm_scantable= s->intra_scantable.permutated;
4361 /* For AIC we skip quant/dequant of INTRADC */
4365 q <<= RECON_SHIFT-3;
4366 /* note: block[0] is assumed to be positive */
4368 // block[0] = (block[0] + (q >> 1)) / q;
4370 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4371 // bias= 1<<(QMAT_SHIFT-1);
4372 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4373 length = s->intra_chroma_ac_vlc_length;
4374 last_length= s->intra_chroma_ac_vlc_last_length;
4376 length = s->intra_ac_vlc_length;
4377 last_length= s->intra_ac_vlc_last_length;
4380 scantable= s->inter_scantable.scantable;
4381 perm_scantable= s->inter_scantable.permutated;
4384 length = s->inter_ac_vlc_length;
4385 last_length= s->inter_ac_vlc_last_length;
4387 last_non_zero = s->block_last_index[n];
4389 dc += (1<<(RECON_SHIFT-1));
4390 for(i=0; i<64; i++){
4391 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4395 for(i=0; i<64; i++){
4400 w= FFABS(weight[i]) + qns*one;
4401 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4404 // w=weight[i] = (63*qns + (w/2)) / w;
4407 av_assert2(w<(1<<6));
4410 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4414 for(i=start_i; i<=last_non_zero; i++){
4415 int j= perm_scantable[i];
4416 const int level= block[j];
4420 if(level<0) coeff= qmul*level - qadd;
4421 else coeff= qmul*level + qadd;
4422 run_tab[rle_index++]=run;
4425 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4432 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4435 int run2, best_unquant_change=0, analyze_gradient;
4436 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4438 if(analyze_gradient){
4439 for(i=0; i<64; i++){
4442 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4448 const int level= block[0];
4449 int change, old_coeff;
4451 av_assert2(s->mb_intra);
4455 for(change=-1; change<=1; change+=2){
4456 int new_level= level + change;
4457 int score, new_coeff;
4459 new_coeff= q*new_level;
4460 if(new_coeff >= 2048 || new_coeff < 0)
4463 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4464 new_coeff - old_coeff);
4465 if(score<best_score){
4468 best_change= change;
4469 best_unquant_change= new_coeff - old_coeff;
4476 run2= run_tab[rle_index++];
4480 for(i=start_i; i<64; i++){
4481 int j= perm_scantable[i];
4482 const int level= block[j];
4483 int change, old_coeff;
4485 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4489 if(level<0) old_coeff= qmul*level - qadd;
4490 else old_coeff= qmul*level + qadd;
4491 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4495 av_assert2(run2>=0 || i >= last_non_zero );
4498 for(change=-1; change<=1; change+=2){
4499 int new_level= level + change;
4500 int score, new_coeff, unquant_change;
4503 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4507 if(new_level<0) new_coeff= qmul*new_level - qadd;
4508 else new_coeff= qmul*new_level + qadd;
4509 if(new_coeff >= 2048 || new_coeff <= -2048)
4511 //FIXME check for overflow
4514 if(level < 63 && level > -63){
4515 if(i < last_non_zero)
4516 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4517 - length[UNI_AC_ENC_INDEX(run, level+64)];
4519 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4520 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4523 av_assert2(FFABS(new_level)==1);
4525 if(analyze_gradient){
4526 int g= d1[ scantable[i] ];
4527 if(g && (g^new_level) >= 0)
4531 if(i < last_non_zero){
4532 int next_i= i + run2 + 1;
4533 int next_level= block[ perm_scantable[next_i] ] + 64;
4535 if(next_level&(~127))
4538 if(next_i < last_non_zero)
4539 score += length[UNI_AC_ENC_INDEX(run, 65)]
4540 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4541 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4543 score += length[UNI_AC_ENC_INDEX(run, 65)]
4544 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4545 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4547 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4549 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4550 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4556 av_assert2(FFABS(level)==1);
4558 if(i < last_non_zero){
4559 int next_i= i + run2 + 1;
4560 int next_level= block[ perm_scantable[next_i] ] + 64;
4562 if(next_level&(~127))
4565 if(next_i < last_non_zero)
4566 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4567 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4568 - length[UNI_AC_ENC_INDEX(run, 65)];
4570 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4571 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4572 - length[UNI_AC_ENC_INDEX(run, 65)];
4574 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4576 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4577 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4584 unquant_change= new_coeff - old_coeff;
4585 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4587 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4589 if(score<best_score){
4592 best_change= change;
4593 best_unquant_change= unquant_change;
4597 prev_level= level + 64;
4598 if(prev_level&(~127))
4608 int j= perm_scantable[ best_coeff ];
4610 block[j] += best_change;
4612 if(best_coeff > last_non_zero){
4613 last_non_zero= best_coeff;
4614 av_assert2(block[j]);
4616 for(; last_non_zero>=start_i; last_non_zero--){
4617 if(block[perm_scantable[last_non_zero]])
4624 for(i=start_i; i<=last_non_zero; i++){
4625 int j= perm_scantable[i];
4626 const int level= block[j];
4629 run_tab[rle_index++]=run;
4636 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4642 return last_non_zero;
4646 * Permute an 8x8 block according to permutation.
4647 * @param block the block which will be permuted according to
4648 * the given permutation vector
4649 * @param permutation the permutation vector
4650 * @param last the last non zero coefficient in scantable order, used to
4651 * speed the permutation up
4652 * @param scantable the used scantable, this is only used to speed the
4653 * permutation up, the block is not (inverse) permutated
4654 * to scantable order!
4656 void ff_block_permute(int16_t *block, uint8_t *permutation,
4657 const uint8_t *scantable, int last)
4664 //FIXME it is ok but not clean and might fail for some permutations
4665 // if (permutation[1] == 1)
4668 for (i = 0; i <= last; i++) {
4669 const int j = scantable[i];
4674 for (i = 0; i <= last; i++) {
4675 const int j = scantable[i];
4676 const int perm_j = permutation[j];
4677 block[perm_j] = temp[j];
4681 int ff_dct_quantize_c(MpegEncContext *s,
4682 int16_t *block, int n,
4683 int qscale, int *overflow)
4685 int i, j, level, last_non_zero, q, start_i;
4687 const uint8_t *scantable;
4690 unsigned int threshold1, threshold2;
4692 s->fdsp.fdct(block);
4694 if(s->dct_error_sum)
4695 s->denoise_dct(s, block);
4698 scantable= s->intra_scantable.scantable;
4706 /* For AIC we skip quant/dequant of INTRADC */
4709 /* note: block[0] is assumed to be positive */
4710 block[0] = (block[0] + (q >> 1)) / q;
4713 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4714 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4716 scantable= s->inter_scantable.scantable;
4719 qmat = s->q_inter_matrix[qscale];
4720 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4722 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4723 threshold2= (threshold1<<1);
4724 for(i=63;i>=start_i;i--) {
4726 level = block[j] * qmat[j];
4728 if(((unsigned)(level+threshold1))>threshold2){
4735 for(i=start_i; i<=last_non_zero; i++) {
4737 level = block[j] * qmat[j];
4739 // if( bias+level >= (1<<QMAT_SHIFT)
4740 // || bias-level >= (1<<QMAT_SHIFT)){
4741 if(((unsigned)(level+threshold1))>threshold2){
4743 level= (bias + level)>>QMAT_SHIFT;
4746 level= (bias - level)>>QMAT_SHIFT;
4754 *overflow= s->max_qcoeff < max; //overflow might have happened
4756 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4757 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4758 ff_block_permute(block, s->idsp.idct_permutation,
4759 scantable, last_non_zero);
4761 return last_non_zero;
4764 #define OFFSET(x) offsetof(MpegEncContext, x)
4765 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4766 static const AVOption h263_options[] = {
4767 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4768 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4773 static const AVClass h263_class = {
4774 .class_name = "H.263 encoder",
4775 .item_name = av_default_item_name,
4776 .option = h263_options,
4777 .version = LIBAVUTIL_VERSION_INT,
4780 AVCodec ff_h263_encoder = {
4782 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4783 .type = AVMEDIA_TYPE_VIDEO,
4784 .id = AV_CODEC_ID_H263,
4785 .priv_data_size = sizeof(MpegEncContext),
4786 .init = ff_mpv_encode_init,
4787 .encode2 = ff_mpv_encode_picture,
4788 .close = ff_mpv_encode_end,
4789 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4790 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4791 .priv_class = &h263_class,
4794 static const AVOption h263p_options[] = {
4795 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4796 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4797 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4798 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4802 static const AVClass h263p_class = {
4803 .class_name = "H.263p encoder",
4804 .item_name = av_default_item_name,
4805 .option = h263p_options,
4806 .version = LIBAVUTIL_VERSION_INT,
4809 AVCodec ff_h263p_encoder = {
4811 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4812 .type = AVMEDIA_TYPE_VIDEO,
4813 .id = AV_CODEC_ID_H263P,
4814 .priv_data_size = sizeof(MpegEncContext),
4815 .init = ff_mpv_encode_init,
4816 .encode2 = ff_mpv_encode_picture,
4817 .close = ff_mpv_encode_end,
4818 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4819 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4820 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4821 .priv_class = &h263p_class,
4824 static const AVClass msmpeg4v2_class = {
4825 .class_name = "msmpeg4v2 encoder",
4826 .item_name = av_default_item_name,
4827 .option = ff_mpv_generic_options,
4828 .version = LIBAVUTIL_VERSION_INT,
4831 AVCodec ff_msmpeg4v2_encoder = {
4832 .name = "msmpeg4v2",
4833 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4834 .type = AVMEDIA_TYPE_VIDEO,
4835 .id = AV_CODEC_ID_MSMPEG4V2,
4836 .priv_data_size = sizeof(MpegEncContext),
4837 .init = ff_mpv_encode_init,
4838 .encode2 = ff_mpv_encode_picture,
4839 .close = ff_mpv_encode_end,
4840 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4841 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4842 .priv_class = &msmpeg4v2_class,
4845 static const AVClass msmpeg4v3_class = {
4846 .class_name = "msmpeg4v3 encoder",
4847 .item_name = av_default_item_name,
4848 .option = ff_mpv_generic_options,
4849 .version = LIBAVUTIL_VERSION_INT,
4852 AVCodec ff_msmpeg4v3_encoder = {
4854 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4855 .type = AVMEDIA_TYPE_VIDEO,
4856 .id = AV_CODEC_ID_MSMPEG4V3,
4857 .priv_data_size = sizeof(MpegEncContext),
4858 .init = ff_mpv_encode_init,
4859 .encode2 = ff_mpv_encode_picture,
4860 .close = ff_mpv_encode_end,
4861 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4862 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4863 .priv_class = &msmpeg4v3_class,
4866 static const AVClass wmv1_class = {
4867 .class_name = "wmv1 encoder",
4868 .item_name = av_default_item_name,
4869 .option = ff_mpv_generic_options,
4870 .version = LIBAVUTIL_VERSION_INT,
4873 AVCodec ff_wmv1_encoder = {
4875 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4876 .type = AVMEDIA_TYPE_VIDEO,
4877 .id = AV_CODEC_ID_WMV1,
4878 .priv_data_size = sizeof(MpegEncContext),
4879 .init = ff_mpv_encode_init,
4880 .encode2 = ff_mpv_encode_picture,
4881 .close = ff_mpv_encode_end,
4882 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4883 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4884 .priv_class = &wmv1_class,