2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/mem_internal.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/thread.h"
47 #include "mpegvideo.h"
48 #include "mpegvideodata.h"
52 #include "mjpegenc_common.h"
54 #include "mpegutils.h"
56 #include "speedhqenc.h"
58 #include "pixblockdsp.h"
62 #include "aandcttab.h"
64 #include "mpeg4video.h"
66 #include "bytestream.h"
69 #include "packet_internal.h"
73 #define QUANT_BIAS_SHIFT 8
75 #define QMAT_SHIFT_MMX 16
78 static int encode_picture(MpegEncContext *s, int picture_number);
79 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
80 static int sse_mb(MpegEncContext *s);
81 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
82 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
84 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
85 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
87 const AVOption ff_mpv_generic_options[] = {
92 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
93 uint16_t (*qmat16)[2][64],
94 const uint16_t *quant_matrix,
95 int bias, int qmin, int qmax, int intra)
97 FDCTDSPContext *fdsp = &s->fdsp;
101 for (qscale = qmin; qscale <= qmax; qscale++) {
105 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
106 else qscale2 = qscale << 1;
108 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
110 fdsp->fdct == ff_faandct ||
111 #endif /* CONFIG_FAANDCT */
112 fdsp->fdct == ff_jpeg_fdct_islow_10) {
113 for (i = 0; i < 64; i++) {
114 const int j = s->idsp.idct_permutation[i];
115 int64_t den = (int64_t) qscale2 * quant_matrix[j];
116 /* 16 <= qscale * quant_matrix[i] <= 7905
117 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
118 * 19952 <= x <= 249205026
119 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
120 * 3444240 >= (1 << 36) / (x) >= 275 */
122 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
124 } else if (fdsp->fdct == ff_fdct_ifast) {
125 for (i = 0; i < 64; i++) {
126 const int j = s->idsp.idct_permutation[i];
127 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
128 /* 16 <= qscale * quant_matrix[i] <= 7905
129 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
130 * 19952 <= x <= 249205026
131 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
132 * 3444240 >= (1 << 36) / (x) >= 275 */
134 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
137 for (i = 0; i < 64; i++) {
138 const int j = s->idsp.idct_permutation[i];
139 int64_t den = (int64_t) qscale2 * quant_matrix[j];
140 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
141 * Assume x = qscale * quant_matrix[i]
143 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
144 * so 32768 >= (1 << 19) / (x) >= 67 */
145 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
146 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
147 // (qscale * quant_matrix[i]);
148 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
150 if (qmat16[qscale][0][i] == 0 ||
151 qmat16[qscale][0][i] == 128 * 256)
152 qmat16[qscale][0][i] = 128 * 256 - 1;
153 qmat16[qscale][1][i] =
154 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
155 qmat16[qscale][0][i]);
159 for (i = intra; i < 64; i++) {
161 if (fdsp->fdct == ff_fdct_ifast) {
162 max = (8191LL * ff_aanscales[i]) >> 14;
164 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
170 av_log(s->avctx, AV_LOG_INFO,
171 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
176 static inline void update_qscale(MpegEncContext *s)
178 if (s->q_scale_type == 1 && 0) {
180 int bestdiff=INT_MAX;
183 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
184 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
185 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
186 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
188 if (diff < bestdiff) {
195 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
196 (FF_LAMBDA_SHIFT + 7);
197 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
200 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
204 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
210 for (i = 0; i < 64; i++) {
211 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
218 * init s->current_picture.qscale_table from s->lambda_table
220 void ff_init_qscale_tab(MpegEncContext *s)
222 int8_t * const qscale_table = s->current_picture.qscale_table;
225 for (i = 0; i < s->mb_num; i++) {
226 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
227 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
228 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
233 static void update_duplicate_context_after_me(MpegEncContext *dst,
236 #define COPY(a) dst->a= src->a
238 COPY(current_picture);
244 COPY(picture_in_gop_number);
245 COPY(gop_picture_number);
246 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
247 COPY(progressive_frame); // FIXME don't set in encode_header
248 COPY(partitioned_frame); // FIXME don't set in encode_header
252 static void mpv_encode_init_static(void)
254 for (int i = -16; i < 16; i++)
255 default_fcode_tab[i + MAX_MV] = 1;
259 * Set the given MpegEncContext to defaults for encoding.
260 * the changed fields will not depend upon the prior state of the MpegEncContext.
262 static void mpv_encode_defaults(MpegEncContext *s)
264 static AVOnce init_static_once = AV_ONCE_INIT;
266 ff_mpv_common_defaults(s);
268 ff_thread_once(&init_static_once, mpv_encode_init_static);
270 s->me.mv_penalty = default_mv_penalty;
271 s->fcode_tab = default_fcode_tab;
273 s->input_picture_number = 0;
274 s->picture_in_gop_number = 0;
277 av_cold int ff_dct_encode_init(MpegEncContext *s)
280 ff_dct_encode_init_x86(s);
282 if (CONFIG_H263_ENCODER)
283 ff_h263dsp_init(&s->h263dsp);
284 if (!s->dct_quantize)
285 s->dct_quantize = ff_dct_quantize_c;
287 s->denoise_dct = denoise_dct_c;
288 s->fast_dct_quantize = s->dct_quantize;
289 if (s->avctx->trellis)
290 s->dct_quantize = dct_quantize_trellis_c;
295 /* init video encoder */
296 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
298 MpegEncContext *s = avctx->priv_data;
299 AVCPBProperties *cpb_props;
302 mpv_encode_defaults(s);
304 switch (avctx->pix_fmt) {
305 case AV_PIX_FMT_YUVJ444P:
306 case AV_PIX_FMT_YUV444P:
307 s->chroma_format = CHROMA_444;
309 case AV_PIX_FMT_YUVJ422P:
310 case AV_PIX_FMT_YUV422P:
311 s->chroma_format = CHROMA_422;
313 case AV_PIX_FMT_YUVJ420P:
314 case AV_PIX_FMT_YUV420P:
316 s->chroma_format = CHROMA_420;
320 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
322 #if FF_API_PRIVATE_OPT
323 FF_DISABLE_DEPRECATION_WARNINGS
324 if (avctx->rtp_payload_size)
325 s->rtp_payload_size = avctx->rtp_payload_size;
326 if (avctx->me_penalty_compensation)
327 s->me_penalty_compensation = avctx->me_penalty_compensation;
329 s->me_pre = avctx->pre_me;
330 FF_ENABLE_DEPRECATION_WARNINGS
333 s->bit_rate = avctx->bit_rate;
334 s->width = avctx->width;
335 s->height = avctx->height;
336 if (avctx->gop_size > 600 &&
337 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
338 av_log(avctx, AV_LOG_WARNING,
339 "keyframe interval too large!, reducing it from %d to %d\n",
340 avctx->gop_size, 600);
341 avctx->gop_size = 600;
343 s->gop_size = avctx->gop_size;
345 if (avctx->max_b_frames > MAX_B_FRAMES) {
346 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
347 "is %d.\n", MAX_B_FRAMES);
348 avctx->max_b_frames = MAX_B_FRAMES;
350 s->max_b_frames = avctx->max_b_frames;
351 s->codec_id = avctx->codec->id;
352 s->strict_std_compliance = avctx->strict_std_compliance;
353 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
354 s->rtp_mode = !!s->rtp_payload_size;
355 s->intra_dc_precision = avctx->intra_dc_precision;
357 // workaround some differences between how applications specify dc precision
358 if (s->intra_dc_precision < 0) {
359 s->intra_dc_precision += 8;
360 } else if (s->intra_dc_precision >= 8)
361 s->intra_dc_precision -= 8;
363 if (s->intra_dc_precision < 0) {
364 av_log(avctx, AV_LOG_ERROR,
365 "intra dc precision must be positive, note some applications use"
366 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
367 return AVERROR(EINVAL);
370 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
373 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
374 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
375 return AVERROR(EINVAL);
377 s->user_specified_pts = AV_NOPTS_VALUE;
379 if (s->gop_size <= 1) {
387 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
389 s->adaptive_quant = (avctx->lumi_masking ||
390 avctx->dark_masking ||
391 avctx->temporal_cplx_masking ||
392 avctx->spatial_cplx_masking ||
395 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
398 s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
400 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
401 switch(avctx->codec_id) {
402 case AV_CODEC_ID_MPEG1VIDEO:
403 case AV_CODEC_ID_MPEG2VIDEO:
404 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
406 case AV_CODEC_ID_MPEG4:
407 case AV_CODEC_ID_MSMPEG4V1:
408 case AV_CODEC_ID_MSMPEG4V2:
409 case AV_CODEC_ID_MSMPEG4V3:
410 if (avctx->rc_max_rate >= 15000000) {
411 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
412 } else if(avctx->rc_max_rate >= 2000000) {
413 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
414 } else if(avctx->rc_max_rate >= 384000) {
415 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
417 avctx->rc_buffer_size = 40;
418 avctx->rc_buffer_size *= 16384;
421 if (avctx->rc_buffer_size) {
422 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
426 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
427 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
428 return AVERROR(EINVAL);
431 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
432 av_log(avctx, AV_LOG_INFO,
433 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
436 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
437 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
438 return AVERROR(EINVAL);
441 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
442 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
443 return AVERROR(EINVAL);
446 if (avctx->rc_max_rate &&
447 avctx->rc_max_rate == avctx->bit_rate &&
448 avctx->rc_max_rate != avctx->rc_min_rate) {
449 av_log(avctx, AV_LOG_INFO,
450 "impossible bitrate constraints, this will fail\n");
453 if (avctx->rc_buffer_size &&
454 avctx->bit_rate * (int64_t)avctx->time_base.num >
455 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
456 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
457 return AVERROR(EINVAL);
460 if (!s->fixed_qscale &&
461 avctx->bit_rate * av_q2d(avctx->time_base) >
462 avctx->bit_rate_tolerance) {
463 av_log(avctx, AV_LOG_WARNING,
464 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
465 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
468 if (avctx->rc_max_rate &&
469 avctx->rc_min_rate == avctx->rc_max_rate &&
470 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
471 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
472 90000LL * (avctx->rc_buffer_size - 1) >
473 avctx->rc_max_rate * 0xFFFFLL) {
474 av_log(avctx, AV_LOG_INFO,
475 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
476 "specified vbv buffer is too large for the given bitrate!\n");
479 if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
480 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
481 s->codec_id != AV_CODEC_ID_FLV1) {
482 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
483 return AVERROR(EINVAL);
486 if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
487 av_log(avctx, AV_LOG_ERROR,
488 "OBMC is only supported with simple mb decision\n");
489 return AVERROR(EINVAL);
492 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
493 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
494 return AVERROR(EINVAL);
497 if (s->max_b_frames &&
498 s->codec_id != AV_CODEC_ID_MPEG4 &&
499 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
500 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
501 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
502 return AVERROR(EINVAL);
504 if (s->max_b_frames < 0) {
505 av_log(avctx, AV_LOG_ERROR,
506 "max b frames must be 0 or positive for mpegvideo based encoders\n");
507 return AVERROR(EINVAL);
510 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
511 s->codec_id == AV_CODEC_ID_H263 ||
512 s->codec_id == AV_CODEC_ID_H263P) &&
513 (avctx->sample_aspect_ratio.num > 255 ||
514 avctx->sample_aspect_ratio.den > 255)) {
515 av_log(avctx, AV_LOG_WARNING,
516 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
517 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
518 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
519 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
522 if ((s->codec_id == AV_CODEC_ID_H263 ||
523 s->codec_id == AV_CODEC_ID_H263P) &&
524 (avctx->width > 2048 ||
525 avctx->height > 1152 )) {
526 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
527 return AVERROR(EINVAL);
529 if ((s->codec_id == AV_CODEC_ID_H263 ||
530 s->codec_id == AV_CODEC_ID_H263P) &&
531 ((avctx->width &3) ||
532 (avctx->height&3) )) {
533 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
534 return AVERROR(EINVAL);
537 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
538 (avctx->width > 4095 ||
539 avctx->height > 4095 )) {
540 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
541 return AVERROR(EINVAL);
544 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
545 (avctx->width > 16383 ||
546 avctx->height > 16383 )) {
547 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
548 return AVERROR(EINVAL);
551 if (s->codec_id == AV_CODEC_ID_RV10 &&
553 avctx->height&15 )) {
554 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
555 return AVERROR(EINVAL);
558 if (s->codec_id == AV_CODEC_ID_RV20 &&
561 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
562 return AVERROR(EINVAL);
565 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
566 s->codec_id == AV_CODEC_ID_WMV2) &&
568 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
569 return AVERROR(EINVAL);
572 if ((avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
573 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
574 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
575 return AVERROR(EINVAL);
578 #if FF_API_PRIVATE_OPT
579 FF_DISABLE_DEPRECATION_WARNINGS
580 if (avctx->mpeg_quant)
582 FF_ENABLE_DEPRECATION_WARNINGS
585 // FIXME mpeg2 uses that too
586 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
587 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
588 av_log(avctx, AV_LOG_ERROR,
589 "mpeg2 style quantization not supported by codec\n");
590 return AVERROR(EINVAL);
593 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
594 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
595 return AVERROR(EINVAL);
598 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
599 avctx->mb_decision != FF_MB_DECISION_RD) {
600 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
601 return AVERROR(EINVAL);
604 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
605 (s->codec_id == AV_CODEC_ID_AMV ||
606 s->codec_id == AV_CODEC_ID_MJPEG)) {
607 // Used to produce garbage with MJPEG.
608 av_log(avctx, AV_LOG_ERROR,
609 "QP RD is no longer compatible with MJPEG or AMV\n");
610 return AVERROR(EINVAL);
613 #if FF_API_PRIVATE_OPT
614 FF_DISABLE_DEPRECATION_WARNINGS
615 if (avctx->scenechange_threshold)
616 s->scenechange_threshold = avctx->scenechange_threshold;
617 FF_ENABLE_DEPRECATION_WARNINGS
620 if (s->scenechange_threshold < 1000000000 &&
621 (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
622 av_log(avctx, AV_LOG_ERROR,
623 "closed gop with scene change detection are not supported yet, "
624 "set threshold to 1000000000\n");
625 return AVERROR_PATCHWELCOME;
628 if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
629 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
630 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
631 av_log(avctx, AV_LOG_ERROR,
632 "low delay forcing is only available for mpeg2, "
633 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
634 return AVERROR(EINVAL);
636 if (s->max_b_frames != 0) {
637 av_log(avctx, AV_LOG_ERROR,
638 "B-frames cannot be used with low delay\n");
639 return AVERROR(EINVAL);
643 if (s->q_scale_type == 1) {
644 if (avctx->qmax > 28) {
645 av_log(avctx, AV_LOG_ERROR,
646 "non linear quant only supports qmax <= 28 currently\n");
647 return AVERROR_PATCHWELCOME;
651 if (avctx->slices > 1 &&
652 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
653 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
654 return AVERROR(EINVAL);
657 if (avctx->thread_count > 1 &&
658 s->codec_id != AV_CODEC_ID_MPEG4 &&
659 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
660 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
661 s->codec_id != AV_CODEC_ID_MJPEG &&
662 (s->codec_id != AV_CODEC_ID_H263P)) {
663 av_log(avctx, AV_LOG_ERROR,
664 "multi threaded encoding not supported by codec\n");
665 return AVERROR_PATCHWELCOME;
668 if (avctx->thread_count < 1) {
669 av_log(avctx, AV_LOG_ERROR,
670 "automatic thread number detection not supported by codec, "
672 return AVERROR_PATCHWELCOME;
675 #if FF_API_PRIVATE_OPT
676 FF_DISABLE_DEPRECATION_WARNINGS
677 if (avctx->b_frame_strategy)
678 s->b_frame_strategy = avctx->b_frame_strategy;
679 if (avctx->b_sensitivity != 40)
680 s->b_sensitivity = avctx->b_sensitivity;
681 FF_ENABLE_DEPRECATION_WARNINGS
684 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
685 av_log(avctx, AV_LOG_INFO,
686 "notice: b_frame_strategy only affects the first pass\n");
687 s->b_frame_strategy = 0;
690 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
692 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
693 avctx->time_base.den /= i;
694 avctx->time_base.num /= i;
698 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
699 // (a + x * 3 / 8) / x
700 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
701 s->inter_quant_bias = 0;
703 s->intra_quant_bias = 0;
705 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
708 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
709 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
710 return AVERROR(EINVAL);
713 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
715 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
716 avctx->time_base.den > (1 << 16) - 1) {
717 av_log(avctx, AV_LOG_ERROR,
718 "timebase %d/%d not supported by MPEG 4 standard, "
719 "the maximum admitted value for the timebase denominator "
720 "is %d\n", avctx->time_base.num, avctx->time_base.den,
722 return AVERROR(EINVAL);
724 s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
726 switch (avctx->codec->id) {
727 case AV_CODEC_ID_MPEG1VIDEO:
728 s->out_format = FMT_MPEG1;
729 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
730 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
732 case AV_CODEC_ID_MPEG2VIDEO:
733 s->out_format = FMT_MPEG1;
734 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
735 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
738 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
739 case AV_CODEC_ID_MJPEG:
740 case AV_CODEC_ID_AMV:
741 s->out_format = FMT_MJPEG;
742 s->intra_only = 1; /* force intra only for jpeg */
743 if ((ret = ff_mjpeg_encode_init(s)) < 0)
749 case AV_CODEC_ID_SPEEDHQ:
750 s->out_format = FMT_SPEEDHQ;
751 s->intra_only = 1; /* force intra only for SHQ */
752 if (!CONFIG_SPEEDHQ_ENCODER)
753 return AVERROR_ENCODER_NOT_FOUND;
754 if ((ret = ff_speedhq_encode_init(s)) < 0)
759 case AV_CODEC_ID_H261:
760 if (!CONFIG_H261_ENCODER)
761 return AVERROR_ENCODER_NOT_FOUND;
762 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
763 av_log(avctx, AV_LOG_ERROR,
764 "The specified picture size of %dx%d is not valid for the "
765 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
766 s->width, s->height);
767 return AVERROR(EINVAL);
769 s->out_format = FMT_H261;
772 s->rtp_mode = 0; /* Sliced encoding not supported */
774 case AV_CODEC_ID_H263:
775 if (!CONFIG_H263_ENCODER)
776 return AVERROR_ENCODER_NOT_FOUND;
777 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
778 s->width, s->height) == 8) {
779 av_log(avctx, AV_LOG_ERROR,
780 "The specified picture size of %dx%d is not valid for "
781 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
782 "352x288, 704x576, and 1408x1152. "
783 "Try H.263+.\n", s->width, s->height);
784 return AVERROR(EINVAL);
786 s->out_format = FMT_H263;
790 case AV_CODEC_ID_H263P:
791 s->out_format = FMT_H263;
794 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
795 s->modified_quant = s->h263_aic;
796 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
797 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
800 /* These are just to be sure */
804 case AV_CODEC_ID_FLV1:
805 s->out_format = FMT_H263;
806 s->h263_flv = 2; /* format = 1; 11-bit codes */
807 s->unrestricted_mv = 1;
808 s->rtp_mode = 0; /* don't allow GOB */
812 case AV_CODEC_ID_RV10:
813 s->out_format = FMT_H263;
817 case AV_CODEC_ID_RV20:
818 s->out_format = FMT_H263;
821 s->modified_quant = 1;
825 s->unrestricted_mv = 0;
827 case AV_CODEC_ID_MPEG4:
828 s->out_format = FMT_H263;
830 s->unrestricted_mv = 1;
831 s->low_delay = s->max_b_frames ? 0 : 1;
832 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
834 case AV_CODEC_ID_MSMPEG4V2:
835 s->out_format = FMT_H263;
837 s->unrestricted_mv = 1;
838 s->msmpeg4_version = 2;
842 case AV_CODEC_ID_MSMPEG4V3:
843 s->out_format = FMT_H263;
845 s->unrestricted_mv = 1;
846 s->msmpeg4_version = 3;
847 s->flipflop_rounding = 1;
851 case AV_CODEC_ID_WMV1:
852 s->out_format = FMT_H263;
854 s->unrestricted_mv = 1;
855 s->msmpeg4_version = 4;
856 s->flipflop_rounding = 1;
860 case AV_CODEC_ID_WMV2:
861 s->out_format = FMT_H263;
863 s->unrestricted_mv = 1;
864 s->msmpeg4_version = 5;
865 s->flipflop_rounding = 1;
870 return AVERROR(EINVAL);
873 #if FF_API_PRIVATE_OPT
874 FF_DISABLE_DEPRECATION_WARNINGS
875 if (avctx->noise_reduction)
876 s->noise_reduction = avctx->noise_reduction;
877 FF_ENABLE_DEPRECATION_WARNINGS
880 avctx->has_b_frames = !s->low_delay;
884 s->progressive_frame =
885 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
886 AV_CODEC_FLAG_INTERLACED_ME) ||
891 if ((ret = ff_mpv_common_init(s)) < 0)
894 ff_fdctdsp_init(&s->fdsp, avctx);
895 ff_me_cmp_init(&s->mecc, avctx);
896 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
897 ff_pixblockdsp_init(&s->pdsp, avctx);
898 ff_qpeldsp_init(&s->qdsp);
900 if (s->msmpeg4_version) {
901 int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
902 if (!(s->ac_stats = av_mallocz(ac_stats_size)))
903 return AVERROR(ENOMEM);
906 if (!(avctx->stats_out = av_mallocz(256)) ||
907 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
908 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
909 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
910 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
911 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
912 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
913 !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
914 !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
915 return AVERROR(ENOMEM);
917 if (s->noise_reduction) {
918 if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
919 return AVERROR(ENOMEM);
922 ff_dct_encode_init(s);
924 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
925 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
927 if (s->slice_context_count > 1) {
930 if (avctx->codec_id == AV_CODEC_ID_H263P)
931 s->h263_slice_structured = 1;
934 s->quant_precision = 5;
936 #if FF_API_PRIVATE_OPT
937 FF_DISABLE_DEPRECATION_WARNINGS
938 if (avctx->frame_skip_threshold)
939 s->frame_skip_threshold = avctx->frame_skip_threshold;
940 if (avctx->frame_skip_factor)
941 s->frame_skip_factor = avctx->frame_skip_factor;
942 if (avctx->frame_skip_exp)
943 s->frame_skip_exp = avctx->frame_skip_exp;
944 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
945 s->frame_skip_cmp = avctx->frame_skip_cmp;
946 FF_ENABLE_DEPRECATION_WARNINGS
949 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
950 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
952 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
953 ff_h261_encode_init(s);
954 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
955 ff_h263_encode_init(s);
956 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
957 ff_msmpeg4_encode_init(s);
958 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
959 && s->out_format == FMT_MPEG1)
960 ff_mpeg1_encode_init(s);
963 for (i = 0; i < 64; i++) {
964 int j = s->idsp.idct_permutation[i];
965 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
967 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
968 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
969 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
971 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
972 } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
974 s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
977 s->chroma_intra_matrix[j] =
978 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
979 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
981 if (avctx->intra_matrix)
982 s->intra_matrix[j] = avctx->intra_matrix[i];
983 if (avctx->inter_matrix)
984 s->inter_matrix[j] = avctx->inter_matrix[i];
987 /* precompute matrix */
988 /* for mjpeg, we do include qscale in the matrix */
989 if (s->out_format != FMT_MJPEG) {
990 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
991 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
993 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
994 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
998 if ((ret = ff_rate_control_init(s)) < 0)
1001 #if FF_API_PRIVATE_OPT
1002 FF_DISABLE_DEPRECATION_WARNINGS
1003 if (avctx->brd_scale)
1004 s->brd_scale = avctx->brd_scale;
1006 if (avctx->prediction_method)
1007 s->pred = avctx->prediction_method + 1;
1008 FF_ENABLE_DEPRECATION_WARNINGS
1011 if (s->b_frame_strategy == 2) {
1012 for (i = 0; i < s->max_b_frames + 2; i++) {
1013 s->tmp_frames[i] = av_frame_alloc();
1014 if (!s->tmp_frames[i])
1015 return AVERROR(ENOMEM);
1017 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1018 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1019 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1021 ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1027 cpb_props = ff_add_cpb_side_data(avctx);
1029 return AVERROR(ENOMEM);
1030 cpb_props->max_bitrate = avctx->rc_max_rate;
1031 cpb_props->min_bitrate = avctx->rc_min_rate;
1032 cpb_props->avg_bitrate = avctx->bit_rate;
1033 cpb_props->buffer_size = avctx->rc_buffer_size;
1038 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1040 MpegEncContext *s = avctx->priv_data;
1043 ff_rate_control_uninit(s);
1045 ff_mpv_common_end(s);
1046 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
1047 s->out_format == FMT_MJPEG)
1048 ff_mjpeg_encode_close(s);
1050 av_freep(&avctx->extradata);
1052 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1053 av_frame_free(&s->tmp_frames[i]);
1055 ff_free_picture_tables(&s->new_picture);
1056 ff_mpeg_unref_picture(avctx, &s->new_picture);
1058 av_freep(&avctx->stats_out);
1059 av_freep(&s->ac_stats);
1061 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1062 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1063 s->q_chroma_intra_matrix= NULL;
1064 s->q_chroma_intra_matrix16= NULL;
1065 av_freep(&s->q_intra_matrix);
1066 av_freep(&s->q_inter_matrix);
1067 av_freep(&s->q_intra_matrix16);
1068 av_freep(&s->q_inter_matrix16);
1069 av_freep(&s->input_picture);
1070 av_freep(&s->reordered_input_picture);
1071 av_freep(&s->dct_offset);
1076 static int get_sae(uint8_t *src, int ref, int stride)
1081 for (y = 0; y < 16; y++) {
1082 for (x = 0; x < 16; x++) {
1083 acc += FFABS(src[x + y * stride] - ref);
1090 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1091 uint8_t *ref, int stride)
1097 h = s->height & ~15;
1099 for (y = 0; y < h; y += 16) {
1100 for (x = 0; x < w; x += 16) {
1101 int offset = x + y * stride;
1102 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1104 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1105 int sae = get_sae(src + offset, mean, stride);
1107 acc += sae + 500 < sad;
1113 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1115 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1116 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1117 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1118 &s->linesize, &s->uvlinesize);
1121 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1123 Picture *pic = NULL;
1125 int i, display_picture_number = 0, ret;
1126 int encoding_delay = s->max_b_frames ? s->max_b_frames
1127 : (s->low_delay ? 0 : 1);
1128 int flush_offset = 1;
1133 display_picture_number = s->input_picture_number++;
1135 if (pts != AV_NOPTS_VALUE) {
1136 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1137 int64_t last = s->user_specified_pts;
1140 av_log(s->avctx, AV_LOG_ERROR,
1141 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1143 return AVERROR(EINVAL);
1146 if (!s->low_delay && display_picture_number == 1)
1147 s->dts_delta = pts - last;
1149 s->user_specified_pts = pts;
1151 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1152 s->user_specified_pts =
1153 pts = s->user_specified_pts + 1;
1154 av_log(s->avctx, AV_LOG_INFO,
1155 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1158 pts = display_picture_number;
1162 if (!pic_arg->buf[0] ||
1163 pic_arg->linesize[0] != s->linesize ||
1164 pic_arg->linesize[1] != s->uvlinesize ||
1165 pic_arg->linesize[2] != s->uvlinesize)
1167 if ((s->width & 15) || (s->height & 15))
1169 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1171 if (s->linesize & (STRIDE_ALIGN-1))
1174 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1175 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1177 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1181 pic = &s->picture[i];
1185 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1188 ret = alloc_picture(s, pic, direct);
1193 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1194 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1195 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1198 int h_chroma_shift, v_chroma_shift;
1199 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1203 for (i = 0; i < 3; i++) {
1204 int src_stride = pic_arg->linesize[i];
1205 int dst_stride = i ? s->uvlinesize : s->linesize;
1206 int h_shift = i ? h_chroma_shift : 0;
1207 int v_shift = i ? v_chroma_shift : 0;
1208 int w = s->width >> h_shift;
1209 int h = s->height >> v_shift;
1210 uint8_t *src = pic_arg->data[i];
1211 uint8_t *dst = pic->f->data[i];
1214 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1215 && !s->progressive_sequence
1216 && FFALIGN(s->height, 32) - s->height > 16)
1219 if (!s->avctx->rc_buffer_size)
1220 dst += INPLACE_OFFSET;
1222 if (src_stride == dst_stride)
1223 memcpy(dst, src, src_stride * h);
1226 uint8_t *dst2 = dst;
1228 memcpy(dst2, src, w);
1233 if ((s->width & 15) || (s->height & (vpad-1))) {
1234 s->mpvencdsp.draw_edges(dst, dst_stride,
1244 ret = av_frame_copy_props(pic->f, pic_arg);
1248 pic->f->display_picture_number = display_picture_number;
1249 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1251 /* Flushing: When we have not received enough input frames,
1252 * ensure s->input_picture[0] contains the first picture */
1253 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1254 if (s->input_picture[flush_offset])
1257 if (flush_offset <= 1)
1260 encoding_delay = encoding_delay - flush_offset + 1;
1263 /* shift buffer entries */
1264 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1265 s->input_picture[i - flush_offset] = s->input_picture[i];
1267 s->input_picture[encoding_delay] = (Picture*) pic;
1272 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1276 int64_t score64 = 0;
1278 for (plane = 0; plane < 3; plane++) {
1279 const int stride = p->f->linesize[plane];
1280 const int bw = plane ? 1 : 2;
1281 for (y = 0; y < s->mb_height * bw; y++) {
1282 for (x = 0; x < s->mb_width * bw; x++) {
1283 int off = p->shared ? 0 : 16;
1284 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1285 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1286 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1288 switch (FFABS(s->frame_skip_exp)) {
1289 case 0: score = FFMAX(score, v); break;
1290 case 1: score += FFABS(v); break;
1291 case 2: score64 += v * (int64_t)v; break;
1292 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1293 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1302 if (s->frame_skip_exp < 0)
1303 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1304 -1.0/s->frame_skip_exp);
1306 if (score64 < s->frame_skip_threshold)
1308 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1313 static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
1318 ret = avcodec_send_frame(c, frame);
1323 ret = avcodec_receive_packet(c, pkt);
1326 av_packet_unref(pkt);
1327 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1334 static int estimate_best_b_count(MpegEncContext *s)
1336 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1338 const int scale = s->brd_scale;
1339 int width = s->width >> scale;
1340 int height = s->height >> scale;
1341 int i, j, out_size, p_lambda, b_lambda, lambda2;
1342 int64_t best_rd = INT64_MAX;
1343 int best_b_count = -1;
1346 av_assert0(scale >= 0 && scale <= 3);
1348 pkt = av_packet_alloc();
1350 return AVERROR(ENOMEM);
1353 //s->next_picture_ptr->quality;
1354 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1355 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1356 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1357 if (!b_lambda) // FIXME we should do this somewhere else
1358 b_lambda = p_lambda;
1359 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1362 for (i = 0; i < s->max_b_frames + 2; i++) {
1363 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1364 s->next_picture_ptr;
1367 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1368 pre_input = *pre_input_ptr;
1369 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1371 if (!pre_input.shared && i) {
1372 data[0] += INPLACE_OFFSET;
1373 data[1] += INPLACE_OFFSET;
1374 data[2] += INPLACE_OFFSET;
1377 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1378 s->tmp_frames[i]->linesize[0],
1380 pre_input.f->linesize[0],
1382 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1383 s->tmp_frames[i]->linesize[1],
1385 pre_input.f->linesize[1],
1386 width >> 1, height >> 1);
1387 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1388 s->tmp_frames[i]->linesize[2],
1390 pre_input.f->linesize[2],
1391 width >> 1, height >> 1);
1395 for (j = 0; j < s->max_b_frames + 1; j++) {
1399 if (!s->input_picture[j])
1402 c = avcodec_alloc_context3(NULL);
1404 ret = AVERROR(ENOMEM);
1410 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1411 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1412 c->mb_decision = s->avctx->mb_decision;
1413 c->me_cmp = s->avctx->me_cmp;
1414 c->mb_cmp = s->avctx->mb_cmp;
1415 c->me_sub_cmp = s->avctx->me_sub_cmp;
1416 c->pix_fmt = AV_PIX_FMT_YUV420P;
1417 c->time_base = s->avctx->time_base;
1418 c->max_b_frames = s->max_b_frames;
1420 ret = avcodec_open2(c, codec, NULL);
1425 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1426 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1428 out_size = encode_frame(c, s->tmp_frames[0], pkt);
1434 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1436 for (i = 0; i < s->max_b_frames + 1; i++) {
1437 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1439 s->tmp_frames[i + 1]->pict_type = is_p ?
1440 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1441 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1443 out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1449 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1452 /* get the delayed frames */
1453 out_size = encode_frame(c, NULL, pkt);
1458 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1460 rd += c->error[0] + c->error[1] + c->error[2];
1468 avcodec_free_context(&c);
1469 av_packet_unref(pkt);
1476 av_packet_free(&pkt);
1478 return best_b_count;
1481 static int select_input_picture(MpegEncContext *s)
1485 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1486 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1487 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1489 /* set next picture type & ordering */
1490 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1491 if (s->frame_skip_threshold || s->frame_skip_factor) {
1492 if (s->picture_in_gop_number < s->gop_size &&
1493 s->next_picture_ptr &&
1494 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1495 // FIXME check that the gop check above is +-1 correct
1496 av_frame_unref(s->input_picture[0]->f);
1498 ff_vbv_update(s, 0);
1504 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1505 !s->next_picture_ptr || s->intra_only) {
1506 s->reordered_input_picture[0] = s->input_picture[0];
1507 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1508 s->reordered_input_picture[0]->f->coded_picture_number =
1509 s->coded_picture_number++;
1513 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1514 for (i = 0; i < s->max_b_frames + 1; i++) {
1515 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1517 if (pict_num >= s->rc_context.num_entries)
1519 if (!s->input_picture[i]) {
1520 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1524 s->input_picture[i]->f->pict_type =
1525 s->rc_context.entry[pict_num].new_pict_type;
1529 if (s->b_frame_strategy == 0) {
1530 b_frames = s->max_b_frames;
1531 while (b_frames && !s->input_picture[b_frames])
1533 } else if (s->b_frame_strategy == 1) {
1534 for (i = 1; i < s->max_b_frames + 1; i++) {
1535 if (s->input_picture[i] &&
1536 s->input_picture[i]->b_frame_score == 0) {
1537 s->input_picture[i]->b_frame_score =
1539 s->input_picture[i ]->f->data[0],
1540 s->input_picture[i - 1]->f->data[0],
1544 for (i = 0; i < s->max_b_frames + 1; i++) {
1545 if (!s->input_picture[i] ||
1546 s->input_picture[i]->b_frame_score - 1 >
1547 s->mb_num / s->b_sensitivity)
1551 b_frames = FFMAX(0, i - 1);
1554 for (i = 0; i < b_frames + 1; i++) {
1555 s->input_picture[i]->b_frame_score = 0;
1557 } else if (s->b_frame_strategy == 2) {
1558 b_frames = estimate_best_b_count(s);
1565 for (i = b_frames - 1; i >= 0; i--) {
1566 int type = s->input_picture[i]->f->pict_type;
1567 if (type && type != AV_PICTURE_TYPE_B)
1570 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1571 b_frames == s->max_b_frames) {
1572 av_log(s->avctx, AV_LOG_ERROR,
1573 "warning, too many B-frames in a row\n");
1576 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1577 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1578 s->gop_size > s->picture_in_gop_number) {
1579 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1581 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1583 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1587 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1588 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1591 s->reordered_input_picture[0] = s->input_picture[b_frames];
1592 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1593 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1594 s->reordered_input_picture[0]->f->coded_picture_number =
1595 s->coded_picture_number++;
1596 for (i = 0; i < b_frames; i++) {
1597 s->reordered_input_picture[i + 1] = s->input_picture[i];
1598 s->reordered_input_picture[i + 1]->f->pict_type =
1600 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1601 s->coded_picture_number++;
1606 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1608 if (s->reordered_input_picture[0]) {
1609 s->reordered_input_picture[0]->reference =
1610 s->reordered_input_picture[0]->f->pict_type !=
1611 AV_PICTURE_TYPE_B ? 3 : 0;
1613 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1616 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1617 // input is a shared pix, so we can't modify it -> allocate a new
1618 // one & ensure that the shared one is reuseable
1621 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1624 pic = &s->picture[i];
1626 pic->reference = s->reordered_input_picture[0]->reference;
1627 if (alloc_picture(s, pic, 0) < 0) {
1631 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1635 /* mark us unused / free shared pic */
1636 av_frame_unref(s->reordered_input_picture[0]->f);
1637 s->reordered_input_picture[0]->shared = 0;
1639 s->current_picture_ptr = pic;
1641 // input is not a shared pix -> reuse buffer for current_pix
1642 s->current_picture_ptr = s->reordered_input_picture[0];
1643 for (i = 0; i < 4; i++) {
1644 if (s->new_picture.f->data[i])
1645 s->new_picture.f->data[i] += INPLACE_OFFSET;
1648 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1649 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1650 s->current_picture_ptr)) < 0)
1653 s->picture_number = s->new_picture.f->display_picture_number;
1658 static void frame_end(MpegEncContext *s)
1660 if (s->unrestricted_mv &&
1661 s->current_picture.reference &&
1663 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1664 int hshift = desc->log2_chroma_w;
1665 int vshift = desc->log2_chroma_h;
1666 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1667 s->current_picture.f->linesize[0],
1668 s->h_edge_pos, s->v_edge_pos,
1669 EDGE_WIDTH, EDGE_WIDTH,
1670 EDGE_TOP | EDGE_BOTTOM);
1671 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1672 s->current_picture.f->linesize[1],
1673 s->h_edge_pos >> hshift,
1674 s->v_edge_pos >> vshift,
1675 EDGE_WIDTH >> hshift,
1676 EDGE_WIDTH >> vshift,
1677 EDGE_TOP | EDGE_BOTTOM);
1678 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1679 s->current_picture.f->linesize[2],
1680 s->h_edge_pos >> hshift,
1681 s->v_edge_pos >> vshift,
1682 EDGE_WIDTH >> hshift,
1683 EDGE_WIDTH >> vshift,
1684 EDGE_TOP | EDGE_BOTTOM);
1689 s->last_pict_type = s->pict_type;
1690 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1691 if (s->pict_type!= AV_PICTURE_TYPE_B)
1692 s->last_non_b_pict_type = s->pict_type;
1694 #if FF_API_CODED_FRAME
1695 FF_DISABLE_DEPRECATION_WARNINGS
1696 av_frame_unref(s->avctx->coded_frame);
1697 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1698 FF_ENABLE_DEPRECATION_WARNINGS
1700 #if FF_API_ERROR_FRAME
1701 FF_DISABLE_DEPRECATION_WARNINGS
1702 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1703 sizeof(s->current_picture.encoding_error));
1704 FF_ENABLE_DEPRECATION_WARNINGS
1708 static void update_noise_reduction(MpegEncContext *s)
1712 for (intra = 0; intra < 2; intra++) {
1713 if (s->dct_count[intra] > (1 << 16)) {
1714 for (i = 0; i < 64; i++) {
1715 s->dct_error_sum[intra][i] >>= 1;
1717 s->dct_count[intra] >>= 1;
1720 for (i = 0; i < 64; i++) {
1721 s->dct_offset[intra][i] = (s->noise_reduction *
1722 s->dct_count[intra] +
1723 s->dct_error_sum[intra][i] / 2) /
1724 (s->dct_error_sum[intra][i] + 1);
1729 static int frame_start(MpegEncContext *s)
1733 /* mark & release old frames */
1734 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1735 s->last_picture_ptr != s->next_picture_ptr &&
1736 s->last_picture_ptr->f->buf[0]) {
1737 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1740 s->current_picture_ptr->f->pict_type = s->pict_type;
1741 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1743 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1744 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1745 s->current_picture_ptr)) < 0)
1748 if (s->pict_type != AV_PICTURE_TYPE_B) {
1749 s->last_picture_ptr = s->next_picture_ptr;
1751 s->next_picture_ptr = s->current_picture_ptr;
1754 if (s->last_picture_ptr) {
1755 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1756 if (s->last_picture_ptr->f->buf[0] &&
1757 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1758 s->last_picture_ptr)) < 0)
1761 if (s->next_picture_ptr) {
1762 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1763 if (s->next_picture_ptr->f->buf[0] &&
1764 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1765 s->next_picture_ptr)) < 0)
1769 if (s->picture_structure!= PICT_FRAME) {
1771 for (i = 0; i < 4; i++) {
1772 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1773 s->current_picture.f->data[i] +=
1774 s->current_picture.f->linesize[i];
1776 s->current_picture.f->linesize[i] *= 2;
1777 s->last_picture.f->linesize[i] *= 2;
1778 s->next_picture.f->linesize[i] *= 2;
1782 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1783 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1784 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1785 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1786 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1787 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1789 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1790 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1793 if (s->dct_error_sum) {
1794 av_assert2(s->noise_reduction && s->encoding);
1795 update_noise_reduction(s);
1801 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1802 const AVFrame *pic_arg, int *got_packet)
1804 MpegEncContext *s = avctx->priv_data;
1805 int i, stuffing_count, ret;
1806 int context_count = s->slice_context_count;
1808 s->vbv_ignore_qmax = 0;
1810 s->picture_in_gop_number++;
1812 if (load_input_picture(s, pic_arg) < 0)
1815 if (select_input_picture(s) < 0) {
1820 if (s->new_picture.f->data[0]) {
1821 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1822 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1824 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1825 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1828 s->mb_info_ptr = av_packet_new_side_data(pkt,
1829 AV_PKT_DATA_H263_MB_INFO,
1830 s->mb_width*s->mb_height*12);
1831 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1834 for (i = 0; i < context_count; i++) {
1835 int start_y = s->thread_context[i]->start_mb_y;
1836 int end_y = s->thread_context[i]-> end_mb_y;
1837 int h = s->mb_height;
1838 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1839 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1841 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1844 s->pict_type = s->new_picture.f->pict_type;
1846 ret = frame_start(s);
1850 ret = encode_picture(s, s->picture_number);
1851 if (growing_buffer) {
1852 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1853 pkt->data = s->pb.buf;
1854 pkt->size = avctx->internal->byte_buffer_size;
1859 #if FF_API_STAT_BITS
1860 FF_DISABLE_DEPRECATION_WARNINGS
1861 avctx->header_bits = s->header_bits;
1862 avctx->mv_bits = s->mv_bits;
1863 avctx->misc_bits = s->misc_bits;
1864 avctx->i_tex_bits = s->i_tex_bits;
1865 avctx->p_tex_bits = s->p_tex_bits;
1866 avctx->i_count = s->i_count;
1867 // FIXME f/b_count in avctx
1868 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1869 avctx->skip_count = s->skip_count;
1870 FF_ENABLE_DEPRECATION_WARNINGS
1875 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
1876 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1878 if (avctx->rc_buffer_size) {
1879 RateControlContext *rcc = &s->rc_context;
1880 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1881 int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1882 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1884 if (put_bits_count(&s->pb) > max_size &&
1885 s->lambda < s->lmax) {
1886 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1887 (s->qscale + 1) / s->qscale);
1888 if (s->adaptive_quant) {
1890 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1891 s->lambda_table[i] =
1892 FFMAX(s->lambda_table[i] + min_step,
1893 s->lambda_table[i] * (s->qscale + 1) /
1896 s->mb_skipped = 0; // done in frame_start()
1897 // done in encode_picture() so we must undo it
1898 if (s->pict_type == AV_PICTURE_TYPE_P) {
1899 if (s->flipflop_rounding ||
1900 s->codec_id == AV_CODEC_ID_H263P ||
1901 s->codec_id == AV_CODEC_ID_MPEG4)
1902 s->no_rounding ^= 1;
1904 if (s->pict_type != AV_PICTURE_TYPE_B) {
1905 s->time_base = s->last_time_base;
1906 s->last_non_b_time = s->time - s->pp_time;
1908 for (i = 0; i < context_count; i++) {
1909 PutBitContext *pb = &s->thread_context[i]->pb;
1910 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1912 s->vbv_ignore_qmax = 1;
1913 av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1917 av_assert0(avctx->rc_max_rate);
1920 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1921 ff_write_pass1_stats(s);
1923 for (i = 0; i < 4; i++) {
1924 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1925 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1927 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1928 s->current_picture_ptr->encoding_error,
1929 (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1932 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1933 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1934 s->misc_bits + s->i_tex_bits +
1936 flush_put_bits(&s->pb);
1937 s->frame_bits = put_bits_count(&s->pb);
1939 stuffing_count = ff_vbv_update(s, s->frame_bits);
1940 s->stuffing_bits = 8*stuffing_count;
1941 if (stuffing_count) {
1942 if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1943 av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1947 switch (s->codec_id) {
1948 case AV_CODEC_ID_MPEG1VIDEO:
1949 case AV_CODEC_ID_MPEG2VIDEO:
1950 while (stuffing_count--) {
1951 put_bits(&s->pb, 8, 0);
1954 case AV_CODEC_ID_MPEG4:
1955 put_bits(&s->pb, 16, 0);
1956 put_bits(&s->pb, 16, 0x1C3);
1957 stuffing_count -= 4;
1958 while (stuffing_count--) {
1959 put_bits(&s->pb, 8, 0xFF);
1963 av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1965 flush_put_bits(&s->pb);
1966 s->frame_bits = put_bits_count(&s->pb);
1969 /* update MPEG-1/2 vbv_delay for CBR */
1970 if (avctx->rc_max_rate &&
1971 avctx->rc_min_rate == avctx->rc_max_rate &&
1972 s->out_format == FMT_MPEG1 &&
1973 90000LL * (avctx->rc_buffer_size - 1) <=
1974 avctx->rc_max_rate * 0xFFFFLL) {
1975 AVCPBProperties *props;
1978 int vbv_delay, min_delay;
1979 double inbits = avctx->rc_max_rate *
1980 av_q2d(avctx->time_base);
1981 int minbits = s->frame_bits - 8 *
1982 (s->vbv_delay_ptr - s->pb.buf - 1);
1983 double bits = s->rc_context.buffer_index + minbits - inbits;
1986 av_log(avctx, AV_LOG_ERROR,
1987 "Internal error, negative bits\n");
1989 av_assert1(s->repeat_first_field == 0);
1991 vbv_delay = bits * 90000 / avctx->rc_max_rate;
1992 min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
1995 vbv_delay = FFMAX(vbv_delay, min_delay);
1997 av_assert0(vbv_delay < 0xFFFF);
1999 s->vbv_delay_ptr[0] &= 0xF8;
2000 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2001 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2002 s->vbv_delay_ptr[2] &= 0x07;
2003 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2005 props = av_cpb_properties_alloc(&props_size);
2007 return AVERROR(ENOMEM);
2008 props->vbv_delay = vbv_delay * 300;
2010 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2011 (uint8_t*)props, props_size);
2017 #if FF_API_VBV_DELAY
2018 FF_DISABLE_DEPRECATION_WARNINGS
2019 avctx->vbv_delay = vbv_delay * 300;
2020 FF_ENABLE_DEPRECATION_WARNINGS
2023 s->total_bits += s->frame_bits;
2024 #if FF_API_STAT_BITS
2025 FF_DISABLE_DEPRECATION_WARNINGS
2026 avctx->frame_bits = s->frame_bits;
2027 FF_ENABLE_DEPRECATION_WARNINGS
2031 pkt->pts = s->current_picture.f->pts;
2032 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2033 if (!s->current_picture.f->coded_picture_number)
2034 pkt->dts = pkt->pts - s->dts_delta;
2036 pkt->dts = s->reordered_pts;
2037 s->reordered_pts = pkt->pts;
2039 pkt->dts = pkt->pts;
2040 if (s->current_picture.f->key_frame)
2041 pkt->flags |= AV_PKT_FLAG_KEY;
2043 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2048 /* release non-reference frames */
2049 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2050 if (!s->picture[i].reference)
2051 ff_mpeg_unref_picture(avctx, &s->picture[i]);
2054 av_assert1((s->frame_bits & 7) == 0);
2056 pkt->size = s->frame_bits / 8;
2057 *got_packet = !!pkt->size;
2061 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2062 int n, int threshold)
2064 static const char tab[64] = {
2065 3, 2, 2, 1, 1, 1, 1, 1,
2066 1, 1, 1, 1, 1, 1, 1, 1,
2067 1, 1, 1, 1, 1, 1, 1, 1,
2068 0, 0, 0, 0, 0, 0, 0, 0,
2069 0, 0, 0, 0, 0, 0, 0, 0,
2070 0, 0, 0, 0, 0, 0, 0, 0,
2071 0, 0, 0, 0, 0, 0, 0, 0,
2072 0, 0, 0, 0, 0, 0, 0, 0
2077 int16_t *block = s->block[n];
2078 const int last_index = s->block_last_index[n];
2081 if (threshold < 0) {
2083 threshold = -threshold;
2087 /* Are all we could set to zero already zero? */
2088 if (last_index <= skip_dc - 1)
2091 for (i = 0; i <= last_index; i++) {
2092 const int j = s->intra_scantable.permutated[i];
2093 const int level = FFABS(block[j]);
2095 if (skip_dc && i == 0)
2099 } else if (level > 1) {
2105 if (score >= threshold)
2107 for (i = skip_dc; i <= last_index; i++) {
2108 const int j = s->intra_scantable.permutated[i];
2112 s->block_last_index[n] = 0;
2114 s->block_last_index[n] = -1;
2117 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2121 const int maxlevel = s->max_qcoeff;
2122 const int minlevel = s->min_qcoeff;
2126 i = 1; // skip clipping of intra dc
2130 for (; i <= last_index; i++) {
2131 const int j = s->intra_scantable.permutated[i];
2132 int level = block[j];
2134 if (level > maxlevel) {
2137 } else if (level < minlevel) {
2145 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2146 av_log(s->avctx, AV_LOG_INFO,
2147 "warning, clipping %d dct coefficients to %d..%d\n",
2148 overflow, minlevel, maxlevel);
2151 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2155 for (y = 0; y < 8; y++) {
2156 for (x = 0; x < 8; x++) {
2162 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2163 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2164 int v = ptr[x2 + y2 * stride];
2170 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2175 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2176 int motion_x, int motion_y,
2177 int mb_block_height,
2181 int16_t weight[12][64];
2182 int16_t orig[12][64];
2183 const int mb_x = s->mb_x;
2184 const int mb_y = s->mb_y;
2187 int dct_offset = s->linesize * 8; // default for progressive frames
2188 int uv_dct_offset = s->uvlinesize * 8;
2189 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2190 ptrdiff_t wrap_y, wrap_c;
2192 for (i = 0; i < mb_block_count; i++)
2193 skip_dct[i] = s->skipdct;
2195 if (s->adaptive_quant) {
2196 const int last_qp = s->qscale;
2197 const int mb_xy = mb_x + mb_y * s->mb_stride;
2199 s->lambda = s->lambda_table[mb_xy];
2202 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2203 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2204 s->dquant = s->qscale - last_qp;
2206 if (s->out_format == FMT_H263) {
2207 s->dquant = av_clip(s->dquant, -2, 2);
2209 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2211 if (s->pict_type == AV_PICTURE_TYPE_B) {
2212 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2215 if (s->mv_type == MV_TYPE_8X8)
2221 ff_set_qscale(s, last_qp + s->dquant);
2222 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2223 ff_set_qscale(s, s->qscale + s->dquant);
2225 wrap_y = s->linesize;
2226 wrap_c = s->uvlinesize;
2227 ptr_y = s->new_picture.f->data[0] +
2228 (mb_y * 16 * wrap_y) + mb_x * 16;
2229 ptr_cb = s->new_picture.f->data[1] +
2230 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2231 ptr_cr = s->new_picture.f->data[2] +
2232 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2234 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2235 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2236 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2237 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2238 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2240 16, 16, mb_x * 16, mb_y * 16,
2241 s->width, s->height);
2243 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2245 mb_block_width, mb_block_height,
2246 mb_x * mb_block_width, mb_y * mb_block_height,
2248 ptr_cb = ebuf + 16 * wrap_y;
2249 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2251 mb_block_width, mb_block_height,
2252 mb_x * mb_block_width, mb_y * mb_block_height,
2254 ptr_cr = ebuf + 16 * wrap_y + 16;
2258 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2259 int progressive_score, interlaced_score;
2261 s->interlaced_dct = 0;
2262 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2263 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2264 NULL, wrap_y, 8) - 400;
2266 if (progressive_score > 0) {
2267 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2268 NULL, wrap_y * 2, 8) +
2269 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2270 NULL, wrap_y * 2, 8);
2271 if (progressive_score > interlaced_score) {
2272 s->interlaced_dct = 1;
2274 dct_offset = wrap_y;
2275 uv_dct_offset = wrap_c;
2277 if (s->chroma_format == CHROMA_422 ||
2278 s->chroma_format == CHROMA_444)
2284 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2285 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2286 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2287 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2289 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2293 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2294 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2295 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2296 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2297 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2298 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2299 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2300 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2301 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2302 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2303 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2304 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2308 op_pixels_func (*op_pix)[4];
2309 qpel_mc_func (*op_qpix)[16];
2310 uint8_t *dest_y, *dest_cb, *dest_cr;
2312 dest_y = s->dest[0];
2313 dest_cb = s->dest[1];
2314 dest_cr = s->dest[2];
2316 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2317 op_pix = s->hdsp.put_pixels_tab;
2318 op_qpix = s->qdsp.put_qpel_pixels_tab;
2320 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2321 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2324 if (s->mv_dir & MV_DIR_FORWARD) {
2325 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2326 s->last_picture.f->data,
2328 op_pix = s->hdsp.avg_pixels_tab;
2329 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2331 if (s->mv_dir & MV_DIR_BACKWARD) {
2332 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2333 s->next_picture.f->data,
2337 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2338 int progressive_score, interlaced_score;
2340 s->interlaced_dct = 0;
2341 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2342 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2346 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2347 progressive_score -= 400;
2349 if (progressive_score > 0) {
2350 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2352 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2356 if (progressive_score > interlaced_score) {
2357 s->interlaced_dct = 1;
2359 dct_offset = wrap_y;
2360 uv_dct_offset = wrap_c;
2362 if (s->chroma_format == CHROMA_422)
2368 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2369 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2370 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2371 dest_y + dct_offset, wrap_y);
2372 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2373 dest_y + dct_offset + 8, wrap_y);
2375 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2379 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2380 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2381 if (!s->chroma_y_shift) { /* 422 */
2382 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2383 dest_cb + uv_dct_offset, wrap_c);
2384 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2385 dest_cr + uv_dct_offset, wrap_c);
2388 /* pre quantization */
2389 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2390 2 * s->qscale * s->qscale) {
2392 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2394 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2396 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2397 wrap_y, 8) < 20 * s->qscale)
2399 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2400 wrap_y, 8) < 20 * s->qscale)
2402 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2404 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2406 if (!s->chroma_y_shift) { /* 422 */
2407 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2408 dest_cb + uv_dct_offset,
2409 wrap_c, 8) < 20 * s->qscale)
2411 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2412 dest_cr + uv_dct_offset,
2413 wrap_c, 8) < 20 * s->qscale)
2419 if (s->quantizer_noise_shaping) {
2421 get_visual_weight(weight[0], ptr_y , wrap_y);
2423 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2425 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2427 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2429 get_visual_weight(weight[4], ptr_cb , wrap_c);
2431 get_visual_weight(weight[5], ptr_cr , wrap_c);
2432 if (!s->chroma_y_shift) { /* 422 */
2434 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2437 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2440 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2443 /* DCT & quantize */
2444 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2446 for (i = 0; i < mb_block_count; i++) {
2449 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2450 // FIXME we could decide to change to quantizer instead of
2452 // JS: I don't think that would be a good idea it could lower
2453 // quality instead of improve it. Just INTRADC clipping
2454 // deserves changes in quantizer
2456 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2458 s->block_last_index[i] = -1;
2460 if (s->quantizer_noise_shaping) {
2461 for (i = 0; i < mb_block_count; i++) {
2463 s->block_last_index[i] =
2464 dct_quantize_refine(s, s->block[i], weight[i],
2465 orig[i], i, s->qscale);
2470 if (s->luma_elim_threshold && !s->mb_intra)
2471 for (i = 0; i < 4; i++)
2472 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2473 if (s->chroma_elim_threshold && !s->mb_intra)
2474 for (i = 4; i < mb_block_count; i++)
2475 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2477 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2478 for (i = 0; i < mb_block_count; i++) {
2479 if (s->block_last_index[i] == -1)
2480 s->coded_score[i] = INT_MAX / 256;
2485 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2486 s->block_last_index[4] =
2487 s->block_last_index[5] = 0;
2489 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2490 if (!s->chroma_y_shift) { /* 422 / 444 */
2491 for (i=6; i<12; i++) {
2492 s->block_last_index[i] = 0;
2493 s->block[i][0] = s->block[4][0];
2498 // non c quantize code returns incorrect block_last_index FIXME
2499 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2500 for (i = 0; i < mb_block_count; i++) {
2502 if (s->block_last_index[i] > 0) {
2503 for (j = 63; j > 0; j--) {
2504 if (s->block[i][s->intra_scantable.permutated[j]])
2507 s->block_last_index[i] = j;
2512 /* huffman encode */
2513 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2514 case AV_CODEC_ID_MPEG1VIDEO:
2515 case AV_CODEC_ID_MPEG2VIDEO:
2516 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2517 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2519 case AV_CODEC_ID_MPEG4:
2520 if (CONFIG_MPEG4_ENCODER)
2521 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2523 case AV_CODEC_ID_MSMPEG4V2:
2524 case AV_CODEC_ID_MSMPEG4V3:
2525 case AV_CODEC_ID_WMV1:
2526 if (CONFIG_MSMPEG4_ENCODER)
2527 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2529 case AV_CODEC_ID_WMV2:
2530 if (CONFIG_WMV2_ENCODER)
2531 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2533 case AV_CODEC_ID_H261:
2534 if (CONFIG_H261_ENCODER)
2535 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2537 case AV_CODEC_ID_H263:
2538 case AV_CODEC_ID_H263P:
2539 case AV_CODEC_ID_FLV1:
2540 case AV_CODEC_ID_RV10:
2541 case AV_CODEC_ID_RV20:
2542 if (CONFIG_H263_ENCODER)
2543 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2545 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2546 case AV_CODEC_ID_MJPEG:
2547 case AV_CODEC_ID_AMV:
2548 ff_mjpeg_encode_mb(s, s->block);
2551 case AV_CODEC_ID_SPEEDHQ:
2552 if (CONFIG_SPEEDHQ_ENCODER)
2553 ff_speedhq_encode_mb(s, s->block);
2560 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2562 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2563 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2564 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2567 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2570 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2573 d->mb_skip_run= s->mb_skip_run;
2575 d->last_dc[i] = s->last_dc[i];
2578 d->mv_bits= s->mv_bits;
2579 d->i_tex_bits= s->i_tex_bits;
2580 d->p_tex_bits= s->p_tex_bits;
2581 d->i_count= s->i_count;
2582 d->f_count= s->f_count;
2583 d->b_count= s->b_count;
2584 d->skip_count= s->skip_count;
2585 d->misc_bits= s->misc_bits;
2589 d->qscale= s->qscale;
2590 d->dquant= s->dquant;
2592 d->esc3_level_length= s->esc3_level_length;
2595 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2598 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2599 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2602 d->mb_skip_run= s->mb_skip_run;
2604 d->last_dc[i] = s->last_dc[i];
2607 d->mv_bits= s->mv_bits;
2608 d->i_tex_bits= s->i_tex_bits;
2609 d->p_tex_bits= s->p_tex_bits;
2610 d->i_count= s->i_count;
2611 d->f_count= s->f_count;
2612 d->b_count= s->b_count;
2613 d->skip_count= s->skip_count;
2614 d->misc_bits= s->misc_bits;
2616 d->mb_intra= s->mb_intra;
2617 d->mb_skipped= s->mb_skipped;
2618 d->mv_type= s->mv_type;
2619 d->mv_dir= s->mv_dir;
2621 if(s->data_partitioning){
2623 d->tex_pb= s->tex_pb;
2627 d->block_last_index[i]= s->block_last_index[i];
2628 d->interlaced_dct= s->interlaced_dct;
2629 d->qscale= s->qscale;
2631 d->esc3_level_length= s->esc3_level_length;
2634 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2635 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2636 int *dmin, int *next_block, int motion_x, int motion_y)
2639 uint8_t *dest_backup[3];
2641 copy_context_before_encode(s, backup, type);
2643 s->block= s->blocks[*next_block];
2644 s->pb= pb[*next_block];
2645 if(s->data_partitioning){
2646 s->pb2 = pb2 [*next_block];
2647 s->tex_pb= tex_pb[*next_block];
2651 memcpy(dest_backup, s->dest, sizeof(s->dest));
2652 s->dest[0] = s->sc.rd_scratchpad;
2653 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2654 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2655 av_assert0(s->linesize >= 32); //FIXME
2658 encode_mb(s, motion_x, motion_y);
2660 score= put_bits_count(&s->pb);
2661 if(s->data_partitioning){
2662 score+= put_bits_count(&s->pb2);
2663 score+= put_bits_count(&s->tex_pb);
2666 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2667 ff_mpv_reconstruct_mb(s, s->block);
2669 score *= s->lambda2;
2670 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2674 memcpy(s->dest, dest_backup, sizeof(s->dest));
2681 copy_context_after_encode(best, s, type);
2685 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2686 const uint32_t *sq = ff_square_tab + 256;
2691 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2692 else if(w==8 && h==8)
2693 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2697 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2706 static int sse_mb(MpegEncContext *s){
2710 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2711 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2714 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2715 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2716 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2717 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2719 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2720 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2721 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2724 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2725 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2726 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2729 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2730 MpegEncContext *s= *(void**)arg;
2734 s->me.dia_size= s->avctx->pre_dia_size;
2735 s->first_slice_line=1;
2736 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2737 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2738 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2740 s->first_slice_line=0;
2748 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2749 MpegEncContext *s= *(void**)arg;
2751 s->me.dia_size= s->avctx->dia_size;
2752 s->first_slice_line=1;
2753 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2754 s->mb_x=0; //for block init below
2755 ff_init_block_index(s);
2756 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2757 s->block_index[0]+=2;
2758 s->block_index[1]+=2;
2759 s->block_index[2]+=2;
2760 s->block_index[3]+=2;
2762 /* compute motion vector & mb_type and store in context */
2763 if(s->pict_type==AV_PICTURE_TYPE_B)
2764 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2766 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2768 s->first_slice_line=0;
2773 static int mb_var_thread(AVCodecContext *c, void *arg){
2774 MpegEncContext *s= *(void**)arg;
2777 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2778 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2781 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2783 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2785 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2786 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2788 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2789 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2790 s->me.mb_var_sum_temp += varc;
2796 static void write_slice_end(MpegEncContext *s){
2797 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2798 if(s->partitioned_frame){
2799 ff_mpeg4_merge_partitions(s);
2802 ff_mpeg4_stuffing(&s->pb);
2803 } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2804 s->out_format == FMT_MJPEG) {
2805 ff_mjpeg_encode_stuffing(s);
2806 } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2807 ff_speedhq_end_slice(s);
2810 flush_put_bits(&s->pb);
2812 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2813 s->misc_bits+= get_bits_diff(s);
2816 static void write_mb_info(MpegEncContext *s)
2818 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2819 int offset = put_bits_count(&s->pb);
2820 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2821 int gobn = s->mb_y / s->gob_index;
2823 if (CONFIG_H263_ENCODER)
2824 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2825 bytestream_put_le32(&ptr, offset);
2826 bytestream_put_byte(&ptr, s->qscale);
2827 bytestream_put_byte(&ptr, gobn);
2828 bytestream_put_le16(&ptr, mba);
2829 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2830 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2831 /* 4MV not implemented */
2832 bytestream_put_byte(&ptr, 0); /* hmv2 */
2833 bytestream_put_byte(&ptr, 0); /* vmv2 */
2836 static void update_mb_info(MpegEncContext *s, int startcode)
2840 if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2841 s->mb_info_size += 12;
2842 s->prev_mb_info = s->last_mb_info;
2845 s->prev_mb_info = put_bytes_count(&s->pb, 0);
2846 /* This might have incremented mb_info_size above, and we return without
2847 * actually writing any info into that slot yet. But in that case,
2848 * this will be called again at the start of the after writing the
2849 * start code, actually writing the mb info. */
2853 s->last_mb_info = put_bytes_count(&s->pb, 0);
2854 if (!s->mb_info_size)
2855 s->mb_info_size += 12;
2859 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2861 if (put_bytes_left(&s->pb, 0) < threshold
2862 && s->slice_context_count == 1
2863 && s->pb.buf == s->avctx->internal->byte_buffer) {
2864 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2865 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2867 uint8_t *new_buffer = NULL;
2868 int new_buffer_size = 0;
2870 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2871 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2872 return AVERROR(ENOMEM);
2877 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2878 s->avctx->internal->byte_buffer_size + size_increase);
2880 return AVERROR(ENOMEM);
2882 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2883 av_free(s->avctx->internal->byte_buffer);
2884 s->avctx->internal->byte_buffer = new_buffer;
2885 s->avctx->internal->byte_buffer_size = new_buffer_size;
2886 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2887 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2888 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2890 if (put_bytes_left(&s->pb, 0) < threshold)
2891 return AVERROR(EINVAL);
2895 static int encode_thread(AVCodecContext *c, void *arg){
2896 MpegEncContext *s= *(void**)arg;
2897 int mb_x, mb_y, mb_y_order;
2898 int chr_h= 16>>s->chroma_y_shift;
2900 MpegEncContext best_s = { 0 }, backup_s;
2901 uint8_t bit_buf[2][MAX_MB_BYTES];
2902 uint8_t bit_buf2[2][MAX_MB_BYTES];
2903 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2904 PutBitContext pb[2], pb2[2], tex_pb[2];
2907 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2908 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2909 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2912 s->last_bits= put_bits_count(&s->pb);
2923 /* init last dc values */
2924 /* note: quant matrix value (8) is implied here */
2925 s->last_dc[i] = 128 << s->intra_dc_precision;
2927 s->current_picture.encoding_error[i] = 0;
2929 if(s->codec_id==AV_CODEC_ID_AMV){
2930 s->last_dc[0] = 128*8/13;
2931 s->last_dc[1] = 128*8/14;
2932 s->last_dc[2] = 128*8/14;
2935 memset(s->last_mv, 0, sizeof(s->last_mv));
2939 switch(s->codec_id){
2940 case AV_CODEC_ID_H263:
2941 case AV_CODEC_ID_H263P:
2942 case AV_CODEC_ID_FLV1:
2943 if (CONFIG_H263_ENCODER)
2944 s->gob_index = H263_GOB_HEIGHT(s->height);
2946 case AV_CODEC_ID_MPEG4:
2947 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2948 ff_mpeg4_init_partitions(s);
2954 s->first_slice_line = 1;
2955 s->ptr_lastgob = s->pb.buf;
2956 for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2957 if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
2959 mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
2960 if (first_in_slice && mb_y_order != s->start_mb_y)
2961 ff_speedhq_end_slice(s);
2962 s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
2969 ff_set_qscale(s, s->qscale);
2970 ff_init_block_index(s);
2972 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2973 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2974 int mb_type= s->mb_type[xy];
2978 int size_increase = s->avctx->internal->byte_buffer_size/4
2979 + s->mb_width*MAX_MB_BYTES;
2981 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2982 if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
2983 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2986 if(s->data_partitioning){
2987 if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
2988 put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
2989 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2995 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2996 ff_update_block_index(s);
2998 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2999 ff_h261_reorder_mb_index(s);
3000 xy= s->mb_y*s->mb_stride + s->mb_x;
3001 mb_type= s->mb_type[xy];
3004 /* write gob / video packet header */
3006 int current_packet_size, is_gob_start;
3008 current_packet_size = put_bytes_count(&s->pb, 1)
3009 - (s->ptr_lastgob - s->pb.buf);
3011 is_gob_start = s->rtp_payload_size &&
3012 current_packet_size >= s->rtp_payload_size &&
3015 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3017 switch(s->codec_id){
3018 case AV_CODEC_ID_H263:
3019 case AV_CODEC_ID_H263P:
3020 if(!s->h263_slice_structured)
3021 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3023 case AV_CODEC_ID_MPEG2VIDEO:
3024 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3025 case AV_CODEC_ID_MPEG1VIDEO:
3026 if(s->mb_skip_run) is_gob_start=0;
3028 case AV_CODEC_ID_MJPEG:
3029 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3034 if(s->start_mb_y != mb_y || mb_x!=0){
3037 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3038 ff_mpeg4_init_partitions(s);
3042 av_assert2((put_bits_count(&s->pb)&7) == 0);
3043 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3045 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3046 int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
3047 int d = 100 / s->error_rate;
3049 current_packet_size=0;
3050 s->pb.buf_ptr= s->ptr_lastgob;
3051 av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3055 #if FF_API_RTP_CALLBACK
3056 FF_DISABLE_DEPRECATION_WARNINGS
3057 if (s->avctx->rtp_callback){
3058 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3059 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3061 FF_ENABLE_DEPRECATION_WARNINGS
3063 update_mb_info(s, 1);
3065 switch(s->codec_id){
3066 case AV_CODEC_ID_MPEG4:
3067 if (CONFIG_MPEG4_ENCODER) {
3068 ff_mpeg4_encode_video_packet_header(s);
3069 ff_mpeg4_clean_buffers(s);
3072 case AV_CODEC_ID_MPEG1VIDEO:
3073 case AV_CODEC_ID_MPEG2VIDEO:
3074 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3075 ff_mpeg1_encode_slice_header(s);
3076 ff_mpeg1_clean_buffers(s);
3079 case AV_CODEC_ID_H263:
3080 case AV_CODEC_ID_H263P:
3081 if (CONFIG_H263_ENCODER)
3082 ff_h263_encode_gob_header(s, mb_y);
3086 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3087 int bits= put_bits_count(&s->pb);
3088 s->misc_bits+= bits - s->last_bits;
3092 s->ptr_lastgob += current_packet_size;
3093 s->first_slice_line=1;
3094 s->resync_mb_x=mb_x;
3095 s->resync_mb_y=mb_y;
3099 if( (s->resync_mb_x == s->mb_x)
3100 && s->resync_mb_y+1 == s->mb_y){
3101 s->first_slice_line=0;
3105 s->dquant=0; //only for QP_RD
3107 update_mb_info(s, 0);
3109 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3111 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3113 copy_context_before_encode(&backup_s, s, -1);
3115 best_s.data_partitioning= s->data_partitioning;
3116 best_s.partitioned_frame= s->partitioned_frame;
3117 if(s->data_partitioning){
3118 backup_s.pb2= s->pb2;
3119 backup_s.tex_pb= s->tex_pb;
3122 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3123 s->mv_dir = MV_DIR_FORWARD;
3124 s->mv_type = MV_TYPE_16X16;
3126 s->mv[0][0][0] = s->p_mv_table[xy][0];
3127 s->mv[0][0][1] = s->p_mv_table[xy][1];
3128 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3129 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3131 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3132 s->mv_dir = MV_DIR_FORWARD;
3133 s->mv_type = MV_TYPE_FIELD;
3136 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3137 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3138 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3140 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3141 &dmin, &next_block, 0, 0);
3143 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3144 s->mv_dir = MV_DIR_FORWARD;
3145 s->mv_type = MV_TYPE_16X16;
3149 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3150 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3152 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3153 s->mv_dir = MV_DIR_FORWARD;
3154 s->mv_type = MV_TYPE_8X8;
3157 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3158 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3160 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3161 &dmin, &next_block, 0, 0);
3163 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3164 s->mv_dir = MV_DIR_FORWARD;
3165 s->mv_type = MV_TYPE_16X16;
3167 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3168 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3169 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3170 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3172 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3173 s->mv_dir = MV_DIR_BACKWARD;
3174 s->mv_type = MV_TYPE_16X16;
3176 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3177 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3178 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3179 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3181 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3182 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3183 s->mv_type = MV_TYPE_16X16;
3185 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3186 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3187 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3188 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3189 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3190 &dmin, &next_block, 0, 0);
3192 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3193 s->mv_dir = MV_DIR_FORWARD;
3194 s->mv_type = MV_TYPE_FIELD;
3197 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3198 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3199 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3201 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3202 &dmin, &next_block, 0, 0);
3204 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3205 s->mv_dir = MV_DIR_BACKWARD;
3206 s->mv_type = MV_TYPE_FIELD;
3209 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3210 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3211 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3213 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3214 &dmin, &next_block, 0, 0);
3216 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3217 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3218 s->mv_type = MV_TYPE_FIELD;
3220 for(dir=0; dir<2; dir++){
3222 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3223 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3224 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3227 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3228 &dmin, &next_block, 0, 0);
3230 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3232 s->mv_type = MV_TYPE_16X16;
3236 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3237 &dmin, &next_block, 0, 0);
3238 if(s->h263_pred || s->h263_aic){
3240 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3242 ff_clean_intra_table_entries(s); //old mode?
3246 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3247 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3248 const int last_qp= backup_s.qscale;
3251 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3252 static const int dquant_tab[4]={-1,1,-2,2};
3253 int storecoefs = s->mb_intra && s->dc_val[0];
3255 av_assert2(backup_s.dquant == 0);
3258 s->mv_dir= best_s.mv_dir;
3259 s->mv_type = MV_TYPE_16X16;
3260 s->mb_intra= best_s.mb_intra;
3261 s->mv[0][0][0] = best_s.mv[0][0][0];
3262 s->mv[0][0][1] = best_s.mv[0][0][1];
3263 s->mv[1][0][0] = best_s.mv[1][0][0];
3264 s->mv[1][0][1] = best_s.mv[1][0][1];
3266 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3267 for(; qpi<4; qpi++){
3268 int dquant= dquant_tab[qpi];
3269 qp= last_qp + dquant;
3270 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3272 backup_s.dquant= dquant;
3275 dc[i]= s->dc_val[0][ s->block_index[i] ];
3276 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3280 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3281 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3282 if(best_s.qscale != qp){
3285 s->dc_val[0][ s->block_index[i] ]= dc[i];
3286 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3293 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3294 int mx= s->b_direct_mv_table[xy][0];
3295 int my= s->b_direct_mv_table[xy][1];
3297 backup_s.dquant = 0;
3298 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3300 ff_mpeg4_set_direct_mv(s, mx, my);
3301 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3302 &dmin, &next_block, mx, my);
3304 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3305 backup_s.dquant = 0;
3306 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3308 ff_mpeg4_set_direct_mv(s, 0, 0);
3309 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3310 &dmin, &next_block, 0, 0);
3312 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3315 coded |= s->block_last_index[i];
3318 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3319 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3320 mx=my=0; //FIXME find the one we actually used
3321 ff_mpeg4_set_direct_mv(s, mx, my);
3322 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3330 s->mv_dir= best_s.mv_dir;
3331 s->mv_type = best_s.mv_type;
3333 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3334 s->mv[0][0][1] = best_s.mv[0][0][1];
3335 s->mv[1][0][0] = best_s.mv[1][0][0];
3336 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3339 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3340 &dmin, &next_block, mx, my);
3345 s->current_picture.qscale_table[xy] = best_s.qscale;
3347 copy_context_after_encode(s, &best_s, -1);
3349 pb_bits_count= put_bits_count(&s->pb);
3350 flush_put_bits(&s->pb);
3351 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3354 if(s->data_partitioning){
3355 pb2_bits_count= put_bits_count(&s->pb2);
3356 flush_put_bits(&s->pb2);
3357 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3358 s->pb2= backup_s.pb2;
3360 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3361 flush_put_bits(&s->tex_pb);
3362 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3363 s->tex_pb= backup_s.tex_pb;
3365 s->last_bits= put_bits_count(&s->pb);
3367 if (CONFIG_H263_ENCODER &&
3368 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3369 ff_h263_update_motion_val(s);
3371 if(next_block==0){ //FIXME 16 vs linesize16
3372 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3373 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3374 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3377 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3378 ff_mpv_reconstruct_mb(s, s->block);
3380 int motion_x = 0, motion_y = 0;
3381 s->mv_type=MV_TYPE_16X16;
3382 // only one MB-Type possible
3385 case CANDIDATE_MB_TYPE_INTRA:
3388 motion_x= s->mv[0][0][0] = 0;
3389 motion_y= s->mv[0][0][1] = 0;
3391 case CANDIDATE_MB_TYPE_INTER:
3392 s->mv_dir = MV_DIR_FORWARD;
3394 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3395 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3397 case CANDIDATE_MB_TYPE_INTER_I:
3398 s->mv_dir = MV_DIR_FORWARD;
3399 s->mv_type = MV_TYPE_FIELD;
3402 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3403 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3404 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3407 case CANDIDATE_MB_TYPE_INTER4V:
3408 s->mv_dir = MV_DIR_FORWARD;
3409 s->mv_type = MV_TYPE_8X8;
3412 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3413 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3416 case CANDIDATE_MB_TYPE_DIRECT:
3417 if (CONFIG_MPEG4_ENCODER) {
3418 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3420 motion_x=s->b_direct_mv_table[xy][0];
3421 motion_y=s->b_direct_mv_table[xy][1];
3422 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3425 case CANDIDATE_MB_TYPE_DIRECT0:
3426 if (CONFIG_MPEG4_ENCODER) {
3427 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3429 ff_mpeg4_set_direct_mv(s, 0, 0);
3432 case CANDIDATE_MB_TYPE_BIDIR:
3433 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3435 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3436 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3437 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3438 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3440 case CANDIDATE_MB_TYPE_BACKWARD:
3441 s->mv_dir = MV_DIR_BACKWARD;
3443 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3444 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3446 case CANDIDATE_MB_TYPE_FORWARD:
3447 s->mv_dir = MV_DIR_FORWARD;
3449 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3450 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3452 case CANDIDATE_MB_TYPE_FORWARD_I:
3453 s->mv_dir = MV_DIR_FORWARD;
3454 s->mv_type = MV_TYPE_FIELD;
3457 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3458 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3459 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3462 case CANDIDATE_MB_TYPE_BACKWARD_I:
3463 s->mv_dir = MV_DIR_BACKWARD;
3464 s->mv_type = MV_TYPE_FIELD;
3467 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3468 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3469 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3472 case CANDIDATE_MB_TYPE_BIDIR_I:
3473 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3474 s->mv_type = MV_TYPE_FIELD;
3476 for(dir=0; dir<2; dir++){
3478 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3479 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3480 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3485 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3488 encode_mb(s, motion_x, motion_y);
3490 // RAL: Update last macroblock type
3491 s->last_mv_dir = s->mv_dir;
3493 if (CONFIG_H263_ENCODER &&
3494 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3495 ff_h263_update_motion_val(s);
3497 ff_mpv_reconstruct_mb(s, s->block);
3500 /* clean the MV table in IPS frames for direct mode in B-frames */
3501 if(s->mb_intra /* && I,P,S_TYPE */){
3502 s->p_mv_table[xy][0]=0;
3503 s->p_mv_table[xy][1]=0;
3506 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3510 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3511 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3513 s->current_picture.encoding_error[0] += sse(
3514 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3515 s->dest[0], w, h, s->linesize);
3516 s->current_picture.encoding_error[1] += sse(
3517 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3518 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3519 s->current_picture.encoding_error[2] += sse(
3520 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3521 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3524 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3525 ff_h263_loop_filter(s);
3527 ff_dlog(s->avctx, "MB %d %d bits\n",
3528 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3532 //not beautiful here but we must write it before flushing so it has to be here
3533 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3534 ff_msmpeg4_encode_ext_header(s);
3538 #if FF_API_RTP_CALLBACK
3539 FF_DISABLE_DEPRECATION_WARNINGS
3540 /* Send the last GOB if RTP */
3541 if (s->avctx->rtp_callback) {
3542 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3543 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3544 /* Call the RTP callback to send the last GOB */
3546 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3548 FF_ENABLE_DEPRECATION_WARNINGS
3554 #define MERGE(field) dst->field += src->field; src->field=0
3555 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3556 MERGE(me.scene_change_score);
3557 MERGE(me.mc_mb_var_sum_temp);
3558 MERGE(me.mb_var_sum_temp);
3561 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3564 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3565 MERGE(dct_count[1]);
3574 MERGE(er.error_count);
3575 MERGE(padding_bug_score);
3576 MERGE(current_picture.encoding_error[0]);
3577 MERGE(current_picture.encoding_error[1]);
3578 MERGE(current_picture.encoding_error[2]);
3580 if (dst->noise_reduction){
3581 for(i=0; i<64; i++){
3582 MERGE(dct_error_sum[0][i]);
3583 MERGE(dct_error_sum[1][i]);
3587 av_assert1(put_bits_count(&src->pb) % 8 ==0);
3588 av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3589 ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3590 flush_put_bits(&dst->pb);
3593 static int estimate_qp(MpegEncContext *s, int dry_run){
3594 if (s->next_lambda){
3595 s->current_picture_ptr->f->quality =
3596 s->current_picture.f->quality = s->next_lambda;
3597 if(!dry_run) s->next_lambda= 0;
3598 } else if (!s->fixed_qscale) {
3599 int quality = ff_rate_estimate_qscale(s, dry_run);
3600 s->current_picture_ptr->f->quality =
3601 s->current_picture.f->quality = quality;
3602 if (s->current_picture.f->quality < 0)
3606 if(s->adaptive_quant){
3607 switch(s->codec_id){
3608 case AV_CODEC_ID_MPEG4:
3609 if (CONFIG_MPEG4_ENCODER)
3610 ff_clean_mpeg4_qscales(s);
3612 case AV_CODEC_ID_H263:
3613 case AV_CODEC_ID_H263P:
3614 case AV_CODEC_ID_FLV1:
3615 if (CONFIG_H263_ENCODER)
3616 ff_clean_h263_qscales(s);
3619 ff_init_qscale_tab(s);
3622 s->lambda= s->lambda_table[0];
3625 s->lambda = s->current_picture.f->quality;
3630 /* must be called before writing the header */
3631 static void set_frame_distances(MpegEncContext * s){
3632 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3633 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3635 if(s->pict_type==AV_PICTURE_TYPE_B){
3636 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3637 av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3639 s->pp_time= s->time - s->last_non_b_time;
3640 s->last_non_b_time= s->time;
3641 av_assert1(s->picture_number==0 || s->pp_time > 0);
3645 static int encode_picture(MpegEncContext *s, int picture_number)
3649 int context_count = s->slice_context_count;
3651 s->picture_number = picture_number;
3653 /* Reset the average MB variance */
3654 s->me.mb_var_sum_temp =
3655 s->me.mc_mb_var_sum_temp = 0;
3657 /* we need to initialize some time vars before we can encode B-frames */
3658 // RAL: Condition added for MPEG1VIDEO
3659 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3660 set_frame_distances(s);
3661 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3662 ff_set_mpeg4_time(s);
3664 s->me.scene_change_score=0;
3666 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3668 if(s->pict_type==AV_PICTURE_TYPE_I){
3669 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3670 else s->no_rounding=0;
3671 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3672 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3673 s->no_rounding ^= 1;
3676 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3677 if (estimate_qp(s,1) < 0)
3679 ff_get_2pass_fcode(s);
3680 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3681 if(s->pict_type==AV_PICTURE_TYPE_B)
3682 s->lambda= s->last_lambda_for[s->pict_type];
3684 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3688 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3689 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3690 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3691 s->q_chroma_intra_matrix = s->q_intra_matrix;
3692 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3695 s->mb_intra=0; //for the rate distortion & bit compare functions
3696 for(i=1; i<context_count; i++){
3697 ret = ff_update_duplicate_context(s->thread_context[i], s);
3705 /* Estimate motion for every MB */
3706 if(s->pict_type != AV_PICTURE_TYPE_I){
3707 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3708 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3709 if (s->pict_type != AV_PICTURE_TYPE_B) {
3710 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3712 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3716 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3717 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3719 for(i=0; i<s->mb_stride*s->mb_height; i++)
3720 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3722 if(!s->fixed_qscale){
3723 /* finding spatial complexity for I-frame rate control */
3724 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3727 for(i=1; i<context_count; i++){
3728 merge_context_after_me(s, s->thread_context[i]);
3730 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3731 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3734 if (s->me.scene_change_score > s->scenechange_threshold &&
3735 s->pict_type == AV_PICTURE_TYPE_P) {
3736 s->pict_type= AV_PICTURE_TYPE_I;
3737 for(i=0; i<s->mb_stride*s->mb_height; i++)
3738 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3739 if(s->msmpeg4_version >= 3)
3741 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3742 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3746 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3747 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3749 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3751 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3752 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3753 s->f_code= FFMAX3(s->f_code, a, b);
3756 ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
3757 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3758 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3762 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3763 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3768 if(s->pict_type==AV_PICTURE_TYPE_B){
3771 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3772 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3773 s->f_code = FFMAX(a, b);
3775 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3776 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3777 s->b_code = FFMAX(a, b);
3779 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3780 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3781 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3782 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3783 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3785 for(dir=0; dir<2; dir++){
3788 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3789 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3790 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3791 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3799 if (estimate_qp(s, 0) < 0)
3802 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3803 s->pict_type == AV_PICTURE_TYPE_I &&
3804 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3805 s->qscale= 3; //reduce clipping problems
3807 if (s->out_format == FMT_MJPEG) {
3808 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3809 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3811 if (s->avctx->intra_matrix) {
3813 luma_matrix = s->avctx->intra_matrix;
3815 if (s->avctx->chroma_intra_matrix)
3816 chroma_matrix = s->avctx->chroma_intra_matrix;
3818 /* for mjpeg, we do include qscale in the matrix */
3820 int j = s->idsp.idct_permutation[i];
3822 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3823 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3825 s->y_dc_scale_table=
3826 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3827 s->chroma_intra_matrix[0] =
3828 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3829 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3830 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3831 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3832 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3835 if(s->codec_id == AV_CODEC_ID_AMV){
3836 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3837 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3839 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3841 s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3842 s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3844 s->y_dc_scale_table= y;
3845 s->c_dc_scale_table= c;
3846 s->intra_matrix[0] = 13;
3847 s->chroma_intra_matrix[0] = 14;
3848 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3849 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3850 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3851 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3855 if (s->out_format == FMT_SPEEDHQ) {
3856 s->y_dc_scale_table=
3857 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3860 //FIXME var duplication
3861 s->current_picture_ptr->f->key_frame =
3862 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3863 s->current_picture_ptr->f->pict_type =
3864 s->current_picture.f->pict_type = s->pict_type;
3866 if (s->current_picture.f->key_frame)
3867 s->picture_in_gop_number=0;
3869 s->mb_x = s->mb_y = 0;
3870 s->last_bits= put_bits_count(&s->pb);
3871 switch(s->out_format) {
3872 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3874 /* s->huffman == HUFFMAN_TABLE_OPTIMAL can only be true for MJPEG. */
3875 if (!CONFIG_MJPEG_ENCODER || s->huffman != HUFFMAN_TABLE_OPTIMAL)
3876 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3877 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3881 if (CONFIG_SPEEDHQ_ENCODER)
3882 ff_speedhq_encode_picture_header(s);
3885 if (CONFIG_H261_ENCODER)
3886 ff_h261_encode_picture_header(s, picture_number);
3889 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3890 ff_wmv2_encode_picture_header(s, picture_number);
3891 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3892 ff_msmpeg4_encode_picture_header(s, picture_number);
3893 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3894 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3897 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3898 ret = ff_rv10_encode_picture_header(s, picture_number);
3902 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3903 ff_rv20_encode_picture_header(s, picture_number);
3904 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3905 ff_flv_encode_picture_header(s, picture_number);
3906 else if (CONFIG_H263_ENCODER)
3907 ff_h263_encode_picture_header(s, picture_number);
3910 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3911 ff_mpeg1_encode_picture_header(s, picture_number);
3916 bits= put_bits_count(&s->pb);
3917 s->header_bits= bits - s->last_bits;
3919 for(i=1; i<context_count; i++){
3920 update_duplicate_context_after_me(s->thread_context[i], s);
3922 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3923 for(i=1; i<context_count; i++){
3924 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3925 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3926 merge_context_after_encode(s, s->thread_context[i]);
3932 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3933 const int intra= s->mb_intra;
3936 s->dct_count[intra]++;
3938 for(i=0; i<64; i++){
3939 int level= block[i];
3943 s->dct_error_sum[intra][i] += level;
3944 level -= s->dct_offset[intra][i];
3945 if(level<0) level=0;
3947 s->dct_error_sum[intra][i] -= level;
3948 level += s->dct_offset[intra][i];
3949 if(level>0) level=0;
3956 static int dct_quantize_trellis_c(MpegEncContext *s,
3957 int16_t *block, int n,
3958 int qscale, int *overflow){
3960 const uint16_t *matrix;
3961 const uint8_t *scantable;
3962 const uint8_t *perm_scantable;
3964 unsigned int threshold1, threshold2;
3976 int coeff_count[64];
3977 int qmul, qadd, start_i, last_non_zero, i, dc;
3978 const int esc_length= s->ac_esc_length;
3980 uint8_t * last_length;
3981 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3984 s->fdsp.fdct(block);
3986 if(s->dct_error_sum)
3987 s->denoise_dct(s, block);
3989 qadd= ((qscale-1)|1)*8;
3991 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3992 else mpeg2_qscale = qscale << 1;
3996 scantable= s->intra_scantable.scantable;
3997 perm_scantable= s->intra_scantable.permutated;
4005 /* For AIC we skip quant/dequant of INTRADC */
4010 /* note: block[0] is assumed to be positive */
4011 block[0] = (block[0] + (q >> 1)) / q;
4014 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4015 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4016 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4017 bias= 1<<(QMAT_SHIFT-1);
4019 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4020 length = s->intra_chroma_ac_vlc_length;
4021 last_length= s->intra_chroma_ac_vlc_last_length;
4023 length = s->intra_ac_vlc_length;
4024 last_length= s->intra_ac_vlc_last_length;
4027 scantable= s->inter_scantable.scantable;
4028 perm_scantable= s->inter_scantable.permutated;
4031 qmat = s->q_inter_matrix[qscale];
4032 matrix = s->inter_matrix;
4033 length = s->inter_ac_vlc_length;
4034 last_length= s->inter_ac_vlc_last_length;
4038 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4039 threshold2= (threshold1<<1);
4041 for(i=63; i>=start_i; i--) {
4042 const int j = scantable[i];
4043 int level = block[j] * qmat[j];
4045 if(((unsigned)(level+threshold1))>threshold2){
4051 for(i=start_i; i<=last_non_zero; i++) {
4052 const int j = scantable[i];
4053 int level = block[j] * qmat[j];
4055 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4056 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4057 if(((unsigned)(level+threshold1))>threshold2){
4059 level= (bias + level)>>QMAT_SHIFT;
4061 coeff[1][i]= level-1;
4062 // coeff[2][k]= level-2;
4064 level= (bias - level)>>QMAT_SHIFT;
4065 coeff[0][i]= -level;
4066 coeff[1][i]= -level+1;
4067 // coeff[2][k]= -level+2;
4069 coeff_count[i]= FFMIN(level, 2);
4070 av_assert2(coeff_count[i]);
4073 coeff[0][i]= (level>>31)|1;
4078 *overflow= s->max_qcoeff < max; //overflow might have happened
4080 if(last_non_zero < start_i){
4081 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4082 return last_non_zero;
4085 score_tab[start_i]= 0;
4086 survivor[0]= start_i;
4089 for(i=start_i; i<=last_non_zero; i++){
4090 int level_index, j, zero_distortion;
4091 int dct_coeff= FFABS(block[ scantable[i] ]);
4092 int best_score=256*256*256*120;
4094 if (s->fdsp.fdct == ff_fdct_ifast)
4095 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4096 zero_distortion= dct_coeff*dct_coeff;
4098 for(level_index=0; level_index < coeff_count[i]; level_index++){
4100 int level= coeff[level_index][i];
4101 const int alevel= FFABS(level);
4106 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4107 unquant_coeff= alevel*qmul + qadd;
4108 } else if(s->out_format == FMT_MJPEG) {
4109 j = s->idsp.idct_permutation[scantable[i]];
4110 unquant_coeff = alevel * matrix[j] * 8;
4112 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4114 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4115 unquant_coeff = (unquant_coeff - 1) | 1;
4117 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4118 unquant_coeff = (unquant_coeff - 1) | 1;
4123 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4125 if((level&(~127)) == 0){
4126 for(j=survivor_count-1; j>=0; j--){
4127 int run= i - survivor[j];
4128 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4129 score += score_tab[i-run];
4131 if(score < best_score){
4134 level_tab[i+1]= level-64;
4138 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4139 for(j=survivor_count-1; j>=0; j--){
4140 int run= i - survivor[j];
4141 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4142 score += score_tab[i-run];
4143 if(score < last_score){
4146 last_level= level-64;
4152 distortion += esc_length*lambda;
4153 for(j=survivor_count-1; j>=0; j--){
4154 int run= i - survivor[j];
4155 int score= distortion + score_tab[i-run];
4157 if(score < best_score){
4160 level_tab[i+1]= level-64;
4164 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4165 for(j=survivor_count-1; j>=0; j--){
4166 int run= i - survivor[j];
4167 int score= distortion + score_tab[i-run];
4168 if(score < last_score){
4171 last_level= level-64;
4179 score_tab[i+1]= best_score;
4181 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4182 if(last_non_zero <= 27){
4183 for(; survivor_count; survivor_count--){
4184 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4188 for(; survivor_count; survivor_count--){
4189 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4194 survivor[ survivor_count++ ]= i+1;
4197 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4198 last_score= 256*256*256*120;
4199 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4200 int score= score_tab[i];
4202 score += lambda * 2; // FIXME more exact?
4204 if(score < last_score){
4207 last_level= level_tab[i];
4208 last_run= run_tab[i];
4213 s->coded_score[n] = last_score;
4215 dc= FFABS(block[0]);
4216 last_non_zero= last_i - 1;
4217 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4219 if(last_non_zero < start_i)
4220 return last_non_zero;
4222 if(last_non_zero == 0 && start_i == 0){
4224 int best_score= dc * dc;
4226 for(i=0; i<coeff_count[0]; i++){
4227 int level= coeff[i][0];
4228 int alevel= FFABS(level);
4229 int unquant_coeff, score, distortion;
4231 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4232 unquant_coeff= (alevel*qmul + qadd)>>3;
4234 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4235 unquant_coeff = (unquant_coeff - 1) | 1;
4237 unquant_coeff = (unquant_coeff + 4) >> 3;
4238 unquant_coeff<<= 3 + 3;
4240 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4242 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4243 else score= distortion + esc_length*lambda;
4245 if(score < best_score){
4247 best_level= level - 64;
4250 block[0]= best_level;
4251 s->coded_score[n] = best_score - dc*dc;
4252 if(best_level == 0) return -1;
4253 else return last_non_zero;
4257 av_assert2(last_level);
4259 block[ perm_scantable[last_non_zero] ]= last_level;
4262 for(; i>start_i; i -= run_tab[i] + 1){
4263 block[ perm_scantable[i-1] ]= level_tab[i];
4266 return last_non_zero;
4269 static int16_t basis[64][64];
4271 static void build_basis(uint8_t *perm){
4278 double s= 0.25*(1<<BASIS_SHIFT);
4280 int perm_index= perm[index];
4281 if(i==0) s*= sqrt(0.5);
4282 if(j==0) s*= sqrt(0.5);
4283 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4290 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4291 int16_t *block, int16_t *weight, int16_t *orig,
4294 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4295 const uint8_t *scantable;
4296 const uint8_t *perm_scantable;
4297 // unsigned int threshold1, threshold2;
4302 int qmul, qadd, start_i, last_non_zero, i, dc;
4304 uint8_t * last_length;
4306 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4308 if(basis[0][0] == 0)
4309 build_basis(s->idsp.idct_permutation);
4314 scantable= s->intra_scantable.scantable;
4315 perm_scantable= s->intra_scantable.permutated;
4322 /* For AIC we skip quant/dequant of INTRADC */
4326 q <<= RECON_SHIFT-3;
4327 /* note: block[0] is assumed to be positive */
4329 // block[0] = (block[0] + (q >> 1)) / q;
4331 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4332 // bias= 1<<(QMAT_SHIFT-1);
4333 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4334 length = s->intra_chroma_ac_vlc_length;
4335 last_length= s->intra_chroma_ac_vlc_last_length;
4337 length = s->intra_ac_vlc_length;
4338 last_length= s->intra_ac_vlc_last_length;
4341 scantable= s->inter_scantable.scantable;
4342 perm_scantable= s->inter_scantable.permutated;
4345 length = s->inter_ac_vlc_length;
4346 last_length= s->inter_ac_vlc_last_length;
4348 last_non_zero = s->block_last_index[n];
4350 dc += (1<<(RECON_SHIFT-1));
4351 for(i=0; i<64; i++){
4352 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4356 for(i=0; i<64; i++){
4361 w= FFABS(weight[i]) + qns*one;
4362 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4365 // w=weight[i] = (63*qns + (w/2)) / w;
4368 av_assert2(w<(1<<6));
4371 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4375 for(i=start_i; i<=last_non_zero; i++){
4376 int j= perm_scantable[i];
4377 const int level= block[j];
4381 if(level<0) coeff= qmul*level - qadd;
4382 else coeff= qmul*level + qadd;
4383 run_tab[rle_index++]=run;
4386 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4393 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4396 int run2, best_unquant_change=0, analyze_gradient;
4397 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4399 if(analyze_gradient){
4400 for(i=0; i<64; i++){
4403 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4409 const int level= block[0];
4410 int change, old_coeff;
4412 av_assert2(s->mb_intra);
4416 for(change=-1; change<=1; change+=2){
4417 int new_level= level + change;
4418 int score, new_coeff;
4420 new_coeff= q*new_level;
4421 if(new_coeff >= 2048 || new_coeff < 0)
4424 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4425 new_coeff - old_coeff);
4426 if(score<best_score){
4429 best_change= change;
4430 best_unquant_change= new_coeff - old_coeff;
4437 run2= run_tab[rle_index++];
4441 for(i=start_i; i<64; i++){
4442 int j= perm_scantable[i];
4443 const int level= block[j];
4444 int change, old_coeff;
4446 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4450 if(level<0) old_coeff= qmul*level - qadd;
4451 else old_coeff= qmul*level + qadd;
4452 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4456 av_assert2(run2>=0 || i >= last_non_zero );
4459 for(change=-1; change<=1; change+=2){
4460 int new_level= level + change;
4461 int score, new_coeff, unquant_change;
4464 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4468 if(new_level<0) new_coeff= qmul*new_level - qadd;
4469 else new_coeff= qmul*new_level + qadd;
4470 if(new_coeff >= 2048 || new_coeff <= -2048)
4472 //FIXME check for overflow
4475 if(level < 63 && level > -63){
4476 if(i < last_non_zero)
4477 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4478 - length[UNI_AC_ENC_INDEX(run, level+64)];
4480 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4481 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4484 av_assert2(FFABS(new_level)==1);
4486 if(analyze_gradient){
4487 int g= d1[ scantable[i] ];
4488 if(g && (g^new_level) >= 0)
4492 if(i < last_non_zero){
4493 int next_i= i + run2 + 1;
4494 int next_level= block[ perm_scantable[next_i] ] + 64;
4496 if(next_level&(~127))
4499 if(next_i < last_non_zero)
4500 score += length[UNI_AC_ENC_INDEX(run, 65)]
4501 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4502 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4504 score += length[UNI_AC_ENC_INDEX(run, 65)]
4505 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4506 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4508 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4510 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4511 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4517 av_assert2(FFABS(level)==1);
4519 if(i < last_non_zero){
4520 int next_i= i + run2 + 1;
4521 int next_level= block[ perm_scantable[next_i] ] + 64;
4523 if(next_level&(~127))
4526 if(next_i < last_non_zero)
4527 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4528 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4529 - length[UNI_AC_ENC_INDEX(run, 65)];
4531 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4532 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4533 - length[UNI_AC_ENC_INDEX(run, 65)];
4535 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4537 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4538 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4545 unquant_change= new_coeff - old_coeff;
4546 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4548 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4550 if(score<best_score){
4553 best_change= change;
4554 best_unquant_change= unquant_change;
4558 prev_level= level + 64;
4559 if(prev_level&(~127))
4569 int j= perm_scantable[ best_coeff ];
4571 block[j] += best_change;
4573 if(best_coeff > last_non_zero){
4574 last_non_zero= best_coeff;
4575 av_assert2(block[j]);
4577 for(; last_non_zero>=start_i; last_non_zero--){
4578 if(block[perm_scantable[last_non_zero]])
4585 for(i=start_i; i<=last_non_zero; i++){
4586 int j= perm_scantable[i];
4587 const int level= block[j];
4590 run_tab[rle_index++]=run;
4597 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4603 return last_non_zero;
4607 * Permute an 8x8 block according to permutation.
4608 * @param block the block which will be permuted according to
4609 * the given permutation vector
4610 * @param permutation the permutation vector
4611 * @param last the last non zero coefficient in scantable order, used to
4612 * speed the permutation up
4613 * @param scantable the used scantable, this is only used to speed the
4614 * permutation up, the block is not (inverse) permutated
4615 * to scantable order!
4617 void ff_block_permute(int16_t *block, uint8_t *permutation,
4618 const uint8_t *scantable, int last)
4625 //FIXME it is ok but not clean and might fail for some permutations
4626 // if (permutation[1] == 1)
4629 for (i = 0; i <= last; i++) {
4630 const int j = scantable[i];
4635 for (i = 0; i <= last; i++) {
4636 const int j = scantable[i];
4637 const int perm_j = permutation[j];
4638 block[perm_j] = temp[j];
4642 int ff_dct_quantize_c(MpegEncContext *s,
4643 int16_t *block, int n,
4644 int qscale, int *overflow)
4646 int i, j, level, last_non_zero, q, start_i;
4648 const uint8_t *scantable;
4651 unsigned int threshold1, threshold2;
4653 s->fdsp.fdct(block);
4655 if(s->dct_error_sum)
4656 s->denoise_dct(s, block);
4659 scantable= s->intra_scantable.scantable;
4667 /* For AIC we skip quant/dequant of INTRADC */
4670 /* note: block[0] is assumed to be positive */
4671 block[0] = (block[0] + (q >> 1)) / q;
4674 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4675 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4677 scantable= s->inter_scantable.scantable;
4680 qmat = s->q_inter_matrix[qscale];
4681 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4683 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4684 threshold2= (threshold1<<1);
4685 for(i=63;i>=start_i;i--) {
4687 level = block[j] * qmat[j];
4689 if(((unsigned)(level+threshold1))>threshold2){
4696 for(i=start_i; i<=last_non_zero; i++) {
4698 level = block[j] * qmat[j];
4700 // if( bias+level >= (1<<QMAT_SHIFT)
4701 // || bias-level >= (1<<QMAT_SHIFT)){
4702 if(((unsigned)(level+threshold1))>threshold2){
4704 level= (bias + level)>>QMAT_SHIFT;
4707 level= (bias - level)>>QMAT_SHIFT;
4715 *overflow= s->max_qcoeff < max; //overflow might have happened
4717 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4718 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4719 ff_block_permute(block, s->idsp.idct_permutation,
4720 scantable, last_non_zero);
4722 return last_non_zero;
4725 #define OFFSET(x) offsetof(MpegEncContext, x)
4726 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4727 static const AVOption h263_options[] = {
4728 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4729 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4734 static const AVClass h263_class = {
4735 .class_name = "H.263 encoder",
4736 .item_name = av_default_item_name,
4737 .option = h263_options,
4738 .version = LIBAVUTIL_VERSION_INT,
4741 AVCodec ff_h263_encoder = {
4743 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4744 .type = AVMEDIA_TYPE_VIDEO,
4745 .id = AV_CODEC_ID_H263,
4746 .priv_data_size = sizeof(MpegEncContext),
4747 .init = ff_mpv_encode_init,
4748 .encode2 = ff_mpv_encode_picture,
4749 .close = ff_mpv_encode_end,
4750 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4751 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4752 .priv_class = &h263_class,
4755 static const AVOption h263p_options[] = {
4756 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4757 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4758 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4759 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4763 static const AVClass h263p_class = {
4764 .class_name = "H.263p encoder",
4765 .item_name = av_default_item_name,
4766 .option = h263p_options,
4767 .version = LIBAVUTIL_VERSION_INT,
4770 AVCodec ff_h263p_encoder = {
4772 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4773 .type = AVMEDIA_TYPE_VIDEO,
4774 .id = AV_CODEC_ID_H263P,
4775 .priv_data_size = sizeof(MpegEncContext),
4776 .init = ff_mpv_encode_init,
4777 .encode2 = ff_mpv_encode_picture,
4778 .close = ff_mpv_encode_end,
4779 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4780 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4781 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4782 .priv_class = &h263p_class,
4785 static const AVClass msmpeg4v2_class = {
4786 .class_name = "msmpeg4v2 encoder",
4787 .item_name = av_default_item_name,
4788 .option = ff_mpv_generic_options,
4789 .version = LIBAVUTIL_VERSION_INT,
4792 AVCodec ff_msmpeg4v2_encoder = {
4793 .name = "msmpeg4v2",
4794 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4795 .type = AVMEDIA_TYPE_VIDEO,
4796 .id = AV_CODEC_ID_MSMPEG4V2,
4797 .priv_data_size = sizeof(MpegEncContext),
4798 .init = ff_mpv_encode_init,
4799 .encode2 = ff_mpv_encode_picture,
4800 .close = ff_mpv_encode_end,
4801 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4802 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4803 .priv_class = &msmpeg4v2_class,
4806 static const AVClass msmpeg4v3_class = {
4807 .class_name = "msmpeg4v3 encoder",
4808 .item_name = av_default_item_name,
4809 .option = ff_mpv_generic_options,
4810 .version = LIBAVUTIL_VERSION_INT,
4813 AVCodec ff_msmpeg4v3_encoder = {
4815 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4816 .type = AVMEDIA_TYPE_VIDEO,
4817 .id = AV_CODEC_ID_MSMPEG4V3,
4818 .priv_data_size = sizeof(MpegEncContext),
4819 .init = ff_mpv_encode_init,
4820 .encode2 = ff_mpv_encode_picture,
4821 .close = ff_mpv_encode_end,
4822 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4823 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4824 .priv_class = &msmpeg4v3_class,
4827 static const AVClass wmv1_class = {
4828 .class_name = "wmv1 encoder",
4829 .item_name = av_default_item_name,
4830 .option = ff_mpv_generic_options,
4831 .version = LIBAVUTIL_VERSION_INT,
4834 AVCodec ff_wmv1_encoder = {
4836 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4837 .type = AVMEDIA_TYPE_VIDEO,
4838 .id = AV_CODEC_ID_WMV1,
4839 .priv_data_size = sizeof(MpegEncContext),
4840 .init = ff_mpv_encode_init,
4841 .encode2 = ff_mpv_encode_picture,
4842 .close = ff_mpv_encode_end,
4843 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4844 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4845 .priv_class = &wmv1_class,