2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/mem_internal.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/thread.h"
47 #include "mpegvideo.h"
48 #include "mpegvideodata.h"
52 #include "mjpegenc_common.h"
54 #include "mpegutils.h"
56 #include "speedhqenc.h"
58 #include "pixblockdsp.h"
62 #include "aandcttab.h"
64 #include "mpeg4video.h"
66 #include "bytestream.h"
69 #include "packet_internal.h"
73 #define QUANT_BIAS_SHIFT 8
75 #define QMAT_SHIFT_MMX 16
78 static int encode_picture(MpegEncContext *s, int picture_number);
79 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
80 static int sse_mb(MpegEncContext *s);
81 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
82 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
84 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
85 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
87 const AVOption ff_mpv_generic_options[] = {
92 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
93 uint16_t (*qmat16)[2][64],
94 const uint16_t *quant_matrix,
95 int bias, int qmin, int qmax, int intra)
97 FDCTDSPContext *fdsp = &s->fdsp;
101 for (qscale = qmin; qscale <= qmax; qscale++) {
105 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
106 else qscale2 = qscale << 1;
108 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
110 fdsp->fdct == ff_faandct ||
111 #endif /* CONFIG_FAANDCT */
112 fdsp->fdct == ff_jpeg_fdct_islow_10) {
113 for (i = 0; i < 64; i++) {
114 const int j = s->idsp.idct_permutation[i];
115 int64_t den = (int64_t) qscale2 * quant_matrix[j];
116 /* 16 <= qscale * quant_matrix[i] <= 7905
117 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
118 * 19952 <= x <= 249205026
119 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
120 * 3444240 >= (1 << 36) / (x) >= 275 */
122 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
124 } else if (fdsp->fdct == ff_fdct_ifast) {
125 for (i = 0; i < 64; i++) {
126 const int j = s->idsp.idct_permutation[i];
127 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
128 /* 16 <= qscale * quant_matrix[i] <= 7905
129 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
130 * 19952 <= x <= 249205026
131 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
132 * 3444240 >= (1 << 36) / (x) >= 275 */
134 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
137 for (i = 0; i < 64; i++) {
138 const int j = s->idsp.idct_permutation[i];
139 int64_t den = (int64_t) qscale2 * quant_matrix[j];
140 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
141 * Assume x = qscale * quant_matrix[i]
143 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
144 * so 32768 >= (1 << 19) / (x) >= 67 */
145 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
146 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
147 // (qscale * quant_matrix[i]);
148 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
150 if (qmat16[qscale][0][i] == 0 ||
151 qmat16[qscale][0][i] == 128 * 256)
152 qmat16[qscale][0][i] = 128 * 256 - 1;
153 qmat16[qscale][1][i] =
154 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
155 qmat16[qscale][0][i]);
159 for (i = intra; i < 64; i++) {
161 if (fdsp->fdct == ff_fdct_ifast) {
162 max = (8191LL * ff_aanscales[i]) >> 14;
164 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
170 av_log(s->avctx, AV_LOG_INFO,
171 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
176 static inline void update_qscale(MpegEncContext *s)
178 if (s->q_scale_type == 1 && 0) {
180 int bestdiff=INT_MAX;
183 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
184 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
185 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
186 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
188 if (diff < bestdiff) {
195 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
196 (FF_LAMBDA_SHIFT + 7);
197 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
200 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
204 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
210 for (i = 0; i < 64; i++) {
211 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
218 * init s->current_picture.qscale_table from s->lambda_table
220 void ff_init_qscale_tab(MpegEncContext *s)
222 int8_t * const qscale_table = s->current_picture.qscale_table;
225 for (i = 0; i < s->mb_num; i++) {
226 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
227 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
228 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
233 static void update_duplicate_context_after_me(MpegEncContext *dst,
236 #define COPY(a) dst->a= src->a
238 COPY(current_picture);
244 COPY(picture_in_gop_number);
245 COPY(gop_picture_number);
246 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
247 COPY(progressive_frame); // FIXME don't set in encode_header
248 COPY(partitioned_frame); // FIXME don't set in encode_header
252 static void mpv_encode_init_static(void)
254 for (int i = -16; i < 16; i++)
255 default_fcode_tab[i + MAX_MV] = 1;
259 * Set the given MpegEncContext to defaults for encoding.
260 * the changed fields will not depend upon the prior state of the MpegEncContext.
262 static void mpv_encode_defaults(MpegEncContext *s)
264 static AVOnce init_static_once = AV_ONCE_INIT;
266 ff_mpv_common_defaults(s);
268 ff_thread_once(&init_static_once, mpv_encode_init_static);
270 s->me.mv_penalty = default_mv_penalty;
271 s->fcode_tab = default_fcode_tab;
273 s->input_picture_number = 0;
274 s->picture_in_gop_number = 0;
277 av_cold int ff_dct_encode_init(MpegEncContext *s)
280 ff_dct_encode_init_x86(s);
282 if (CONFIG_H263_ENCODER)
283 ff_h263dsp_init(&s->h263dsp);
284 if (!s->dct_quantize)
285 s->dct_quantize = ff_dct_quantize_c;
287 s->denoise_dct = denoise_dct_c;
288 s->fast_dct_quantize = s->dct_quantize;
289 if (s->avctx->trellis)
290 s->dct_quantize = dct_quantize_trellis_c;
295 /* init video encoder */
296 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
298 MpegEncContext *s = avctx->priv_data;
299 AVCPBProperties *cpb_props;
302 mpv_encode_defaults(s);
304 switch (avctx->pix_fmt) {
305 case AV_PIX_FMT_YUVJ444P:
306 case AV_PIX_FMT_YUV444P:
307 s->chroma_format = CHROMA_444;
309 case AV_PIX_FMT_YUVJ422P:
310 case AV_PIX_FMT_YUV422P:
311 s->chroma_format = CHROMA_422;
313 case AV_PIX_FMT_YUVJ420P:
314 case AV_PIX_FMT_YUV420P:
316 s->chroma_format = CHROMA_420;
320 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
322 #if FF_API_PRIVATE_OPT
323 FF_DISABLE_DEPRECATION_WARNINGS
324 if (avctx->rtp_payload_size)
325 s->rtp_payload_size = avctx->rtp_payload_size;
326 if (avctx->me_penalty_compensation)
327 s->me_penalty_compensation = avctx->me_penalty_compensation;
329 s->me_pre = avctx->pre_me;
330 FF_ENABLE_DEPRECATION_WARNINGS
333 s->bit_rate = avctx->bit_rate;
334 s->width = avctx->width;
335 s->height = avctx->height;
336 if (avctx->gop_size > 600 &&
337 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
338 av_log(avctx, AV_LOG_WARNING,
339 "keyframe interval too large!, reducing it from %d to %d\n",
340 avctx->gop_size, 600);
341 avctx->gop_size = 600;
343 s->gop_size = avctx->gop_size;
345 if (avctx->max_b_frames > MAX_B_FRAMES) {
346 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
347 "is %d.\n", MAX_B_FRAMES);
348 avctx->max_b_frames = MAX_B_FRAMES;
350 s->max_b_frames = avctx->max_b_frames;
351 s->codec_id = avctx->codec->id;
352 s->strict_std_compliance = avctx->strict_std_compliance;
353 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
354 s->rtp_mode = !!s->rtp_payload_size;
355 s->intra_dc_precision = avctx->intra_dc_precision;
357 // workaround some differences between how applications specify dc precision
358 if (s->intra_dc_precision < 0) {
359 s->intra_dc_precision += 8;
360 } else if (s->intra_dc_precision >= 8)
361 s->intra_dc_precision -= 8;
363 if (s->intra_dc_precision < 0) {
364 av_log(avctx, AV_LOG_ERROR,
365 "intra dc precision must be positive, note some applications use"
366 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
367 return AVERROR(EINVAL);
370 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
373 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
374 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
375 return AVERROR(EINVAL);
377 s->user_specified_pts = AV_NOPTS_VALUE;
379 if (s->gop_size <= 1) {
387 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
389 s->adaptive_quant = (avctx->lumi_masking ||
390 avctx->dark_masking ||
391 avctx->temporal_cplx_masking ||
392 avctx->spatial_cplx_masking ||
395 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
398 s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
400 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
401 switch(avctx->codec_id) {
402 case AV_CODEC_ID_MPEG1VIDEO:
403 case AV_CODEC_ID_MPEG2VIDEO:
404 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
406 case AV_CODEC_ID_MPEG4:
407 case AV_CODEC_ID_MSMPEG4V1:
408 case AV_CODEC_ID_MSMPEG4V2:
409 case AV_CODEC_ID_MSMPEG4V3:
410 if (avctx->rc_max_rate >= 15000000) {
411 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
412 } else if(avctx->rc_max_rate >= 2000000) {
413 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
414 } else if(avctx->rc_max_rate >= 384000) {
415 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
417 avctx->rc_buffer_size = 40;
418 avctx->rc_buffer_size *= 16384;
421 if (avctx->rc_buffer_size) {
422 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
426 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
427 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
428 return AVERROR(EINVAL);
431 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
432 av_log(avctx, AV_LOG_INFO,
433 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
436 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
437 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
438 return AVERROR(EINVAL);
441 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
442 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
443 return AVERROR(EINVAL);
446 if (avctx->rc_max_rate &&
447 avctx->rc_max_rate == avctx->bit_rate &&
448 avctx->rc_max_rate != avctx->rc_min_rate) {
449 av_log(avctx, AV_LOG_INFO,
450 "impossible bitrate constraints, this will fail\n");
453 if (avctx->rc_buffer_size &&
454 avctx->bit_rate * (int64_t)avctx->time_base.num >
455 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
456 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
457 return AVERROR(EINVAL);
460 if (!s->fixed_qscale &&
461 avctx->bit_rate * av_q2d(avctx->time_base) >
462 avctx->bit_rate_tolerance) {
463 av_log(avctx, AV_LOG_WARNING,
464 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
465 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
468 if (avctx->rc_max_rate &&
469 avctx->rc_min_rate == avctx->rc_max_rate &&
470 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
471 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
472 90000LL * (avctx->rc_buffer_size - 1) >
473 avctx->rc_max_rate * 0xFFFFLL) {
474 av_log(avctx, AV_LOG_INFO,
475 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
476 "specified vbv buffer is too large for the given bitrate!\n");
479 if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
480 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
481 s->codec_id != AV_CODEC_ID_FLV1) {
482 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
483 return AVERROR(EINVAL);
486 if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
487 av_log(avctx, AV_LOG_ERROR,
488 "OBMC is only supported with simple mb decision\n");
489 return AVERROR(EINVAL);
492 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
493 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
494 return AVERROR(EINVAL);
497 if (s->max_b_frames &&
498 s->codec_id != AV_CODEC_ID_MPEG4 &&
499 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
500 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
501 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
502 return AVERROR(EINVAL);
504 if (s->max_b_frames < 0) {
505 av_log(avctx, AV_LOG_ERROR,
506 "max b frames must be 0 or positive for mpegvideo based encoders\n");
507 return AVERROR(EINVAL);
510 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
511 s->codec_id == AV_CODEC_ID_H263 ||
512 s->codec_id == AV_CODEC_ID_H263P) &&
513 (avctx->sample_aspect_ratio.num > 255 ||
514 avctx->sample_aspect_ratio.den > 255)) {
515 av_log(avctx, AV_LOG_WARNING,
516 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
517 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
518 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
519 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
522 if ((s->codec_id == AV_CODEC_ID_H263 ||
523 s->codec_id == AV_CODEC_ID_H263P) &&
524 (avctx->width > 2048 ||
525 avctx->height > 1152 )) {
526 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
527 return AVERROR(EINVAL);
529 if ((s->codec_id == AV_CODEC_ID_H263 ||
530 s->codec_id == AV_CODEC_ID_H263P) &&
531 ((avctx->width &3) ||
532 (avctx->height&3) )) {
533 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
534 return AVERROR(EINVAL);
537 if (s->codec_id == AV_CODEC_ID_RV10 &&
539 avctx->height&15 )) {
540 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
541 return AVERROR(EINVAL);
544 if (s->codec_id == AV_CODEC_ID_RV20 &&
547 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
548 return AVERROR(EINVAL);
551 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
552 s->codec_id == AV_CODEC_ID_WMV2) &&
554 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
555 return AVERROR(EINVAL);
558 if ((avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
559 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
560 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
561 return AVERROR(EINVAL);
564 #if FF_API_PRIVATE_OPT
565 FF_DISABLE_DEPRECATION_WARNINGS
566 if (avctx->mpeg_quant)
568 FF_ENABLE_DEPRECATION_WARNINGS
571 // FIXME mpeg2 uses that too
572 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
573 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
574 av_log(avctx, AV_LOG_ERROR,
575 "mpeg2 style quantization not supported by codec\n");
576 return AVERROR(EINVAL);
579 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
580 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
581 return AVERROR(EINVAL);
584 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
585 avctx->mb_decision != FF_MB_DECISION_RD) {
586 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
587 return AVERROR(EINVAL);
590 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
591 (s->codec_id == AV_CODEC_ID_AMV ||
592 s->codec_id == AV_CODEC_ID_MJPEG)) {
593 // Used to produce garbage with MJPEG.
594 av_log(avctx, AV_LOG_ERROR,
595 "QP RD is no longer compatible with MJPEG or AMV\n");
596 return AVERROR(EINVAL);
599 #if FF_API_PRIVATE_OPT
600 FF_DISABLE_DEPRECATION_WARNINGS
601 if (avctx->scenechange_threshold)
602 s->scenechange_threshold = avctx->scenechange_threshold;
603 FF_ENABLE_DEPRECATION_WARNINGS
606 if (s->scenechange_threshold < 1000000000 &&
607 (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
608 av_log(avctx, AV_LOG_ERROR,
609 "closed gop with scene change detection are not supported yet, "
610 "set threshold to 1000000000\n");
611 return AVERROR_PATCHWELCOME;
614 if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
615 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
616 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
617 av_log(avctx, AV_LOG_ERROR,
618 "low delay forcing is only available for mpeg2, "
619 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
620 return AVERROR(EINVAL);
622 if (s->max_b_frames != 0) {
623 av_log(avctx, AV_LOG_ERROR,
624 "B-frames cannot be used with low delay\n");
625 return AVERROR(EINVAL);
629 if (s->q_scale_type == 1) {
630 if (avctx->qmax > 28) {
631 av_log(avctx, AV_LOG_ERROR,
632 "non linear quant only supports qmax <= 28 currently\n");
633 return AVERROR_PATCHWELCOME;
637 if (avctx->slices > 1 &&
638 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
639 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
640 return AVERROR(EINVAL);
643 if (avctx->thread_count > 1 &&
644 s->codec_id != AV_CODEC_ID_MPEG4 &&
645 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
646 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
647 s->codec_id != AV_CODEC_ID_MJPEG &&
648 (s->codec_id != AV_CODEC_ID_H263P)) {
649 av_log(avctx, AV_LOG_ERROR,
650 "multi threaded encoding not supported by codec\n");
651 return AVERROR_PATCHWELCOME;
654 if (avctx->thread_count < 1) {
655 av_log(avctx, AV_LOG_ERROR,
656 "automatic thread number detection not supported by codec, "
658 return AVERROR_PATCHWELCOME;
661 #if FF_API_PRIVATE_OPT
662 FF_DISABLE_DEPRECATION_WARNINGS
663 if (avctx->b_frame_strategy)
664 s->b_frame_strategy = avctx->b_frame_strategy;
665 if (avctx->b_sensitivity != 40)
666 s->b_sensitivity = avctx->b_sensitivity;
667 FF_ENABLE_DEPRECATION_WARNINGS
670 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
671 av_log(avctx, AV_LOG_INFO,
672 "notice: b_frame_strategy only affects the first pass\n");
673 s->b_frame_strategy = 0;
676 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
678 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
679 avctx->time_base.den /= i;
680 avctx->time_base.num /= i;
684 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
685 // (a + x * 3 / 8) / x
686 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
687 s->inter_quant_bias = 0;
689 s->intra_quant_bias = 0;
691 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
694 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
695 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
696 return AVERROR(EINVAL);
699 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
701 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
702 avctx->time_base.den > (1 << 16) - 1) {
703 av_log(avctx, AV_LOG_ERROR,
704 "timebase %d/%d not supported by MPEG 4 standard, "
705 "the maximum admitted value for the timebase denominator "
706 "is %d\n", avctx->time_base.num, avctx->time_base.den,
708 return AVERROR(EINVAL);
710 s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
712 switch (avctx->codec->id) {
713 case AV_CODEC_ID_MPEG1VIDEO:
714 s->out_format = FMT_MPEG1;
715 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
716 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
718 case AV_CODEC_ID_MPEG2VIDEO:
719 s->out_format = FMT_MPEG1;
720 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
721 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
724 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
725 case AV_CODEC_ID_MJPEG:
726 case AV_CODEC_ID_AMV:
727 s->out_format = FMT_MJPEG;
728 s->intra_only = 1; /* force intra only for jpeg */
729 if ((ret = ff_mjpeg_encode_init(s)) < 0)
735 case AV_CODEC_ID_SPEEDHQ:
736 s->out_format = FMT_SPEEDHQ;
737 s->intra_only = 1; /* force intra only for SHQ */
738 if (!CONFIG_SPEEDHQ_ENCODER)
739 return AVERROR_ENCODER_NOT_FOUND;
740 if ((ret = ff_speedhq_encode_init(s)) < 0)
745 case AV_CODEC_ID_H261:
746 if (!CONFIG_H261_ENCODER)
747 return AVERROR_ENCODER_NOT_FOUND;
748 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
749 av_log(avctx, AV_LOG_ERROR,
750 "The specified picture size of %dx%d is not valid for the "
751 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
752 s->width, s->height);
753 return AVERROR(EINVAL);
755 s->out_format = FMT_H261;
758 s->rtp_mode = 0; /* Sliced encoding not supported */
760 case AV_CODEC_ID_H263:
761 if (!CONFIG_H263_ENCODER)
762 return AVERROR_ENCODER_NOT_FOUND;
763 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
764 s->width, s->height) == 8) {
765 av_log(avctx, AV_LOG_ERROR,
766 "The specified picture size of %dx%d is not valid for "
767 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
768 "352x288, 704x576, and 1408x1152. "
769 "Try H.263+.\n", s->width, s->height);
770 return AVERROR(EINVAL);
772 s->out_format = FMT_H263;
776 case AV_CODEC_ID_H263P:
777 s->out_format = FMT_H263;
780 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
781 s->modified_quant = s->h263_aic;
782 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
783 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
786 /* These are just to be sure */
790 case AV_CODEC_ID_FLV1:
791 s->out_format = FMT_H263;
792 s->h263_flv = 2; /* format = 1; 11-bit codes */
793 s->unrestricted_mv = 1;
794 s->rtp_mode = 0; /* don't allow GOB */
798 case AV_CODEC_ID_RV10:
799 s->out_format = FMT_H263;
803 case AV_CODEC_ID_RV20:
804 s->out_format = FMT_H263;
807 s->modified_quant = 1;
811 s->unrestricted_mv = 0;
813 case AV_CODEC_ID_MPEG4:
814 s->out_format = FMT_H263;
816 s->unrestricted_mv = 1;
817 s->low_delay = s->max_b_frames ? 0 : 1;
818 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
820 case AV_CODEC_ID_MSMPEG4V2:
821 s->out_format = FMT_H263;
823 s->unrestricted_mv = 1;
824 s->msmpeg4_version = 2;
828 case AV_CODEC_ID_MSMPEG4V3:
829 s->out_format = FMT_H263;
831 s->unrestricted_mv = 1;
832 s->msmpeg4_version = 3;
833 s->flipflop_rounding = 1;
837 case AV_CODEC_ID_WMV1:
838 s->out_format = FMT_H263;
840 s->unrestricted_mv = 1;
841 s->msmpeg4_version = 4;
842 s->flipflop_rounding = 1;
846 case AV_CODEC_ID_WMV2:
847 s->out_format = FMT_H263;
849 s->unrestricted_mv = 1;
850 s->msmpeg4_version = 5;
851 s->flipflop_rounding = 1;
856 return AVERROR(EINVAL);
859 #if FF_API_PRIVATE_OPT
860 FF_DISABLE_DEPRECATION_WARNINGS
861 if (avctx->noise_reduction)
862 s->noise_reduction = avctx->noise_reduction;
863 FF_ENABLE_DEPRECATION_WARNINGS
866 avctx->has_b_frames = !s->low_delay;
870 s->progressive_frame =
871 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
872 AV_CODEC_FLAG_INTERLACED_ME) ||
877 if ((ret = ff_mpv_common_init(s)) < 0)
880 ff_fdctdsp_init(&s->fdsp, avctx);
881 ff_me_cmp_init(&s->mecc, avctx);
882 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
883 ff_pixblockdsp_init(&s->pdsp, avctx);
884 ff_qpeldsp_init(&s->qdsp);
886 if (s->msmpeg4_version) {
887 int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
888 if (!(s->ac_stats = av_mallocz(ac_stats_size)))
889 return AVERROR(ENOMEM);
892 if (!(avctx->stats_out = av_mallocz(256)) ||
893 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
894 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
895 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
896 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
897 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
898 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
899 !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
900 !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
901 return AVERROR(ENOMEM);
903 if (s->noise_reduction) {
904 if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
905 return AVERROR(ENOMEM);
908 ff_dct_encode_init(s);
910 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
911 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
913 if (s->slice_context_count > 1) {
916 if (avctx->codec_id == AV_CODEC_ID_H263P)
917 s->h263_slice_structured = 1;
920 s->quant_precision = 5;
922 #if FF_API_PRIVATE_OPT
923 FF_DISABLE_DEPRECATION_WARNINGS
924 if (avctx->frame_skip_threshold)
925 s->frame_skip_threshold = avctx->frame_skip_threshold;
926 if (avctx->frame_skip_factor)
927 s->frame_skip_factor = avctx->frame_skip_factor;
928 if (avctx->frame_skip_exp)
929 s->frame_skip_exp = avctx->frame_skip_exp;
930 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
931 s->frame_skip_cmp = avctx->frame_skip_cmp;
932 FF_ENABLE_DEPRECATION_WARNINGS
935 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
936 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
938 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
939 ff_h261_encode_init(s);
940 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
941 ff_h263_encode_init(s);
942 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
943 ff_msmpeg4_encode_init(s);
944 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
945 && s->out_format == FMT_MPEG1)
946 ff_mpeg1_encode_init(s);
949 for (i = 0; i < 64; i++) {
950 int j = s->idsp.idct_permutation[i];
951 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
953 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
954 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
955 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
957 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
958 } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
960 s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
963 s->chroma_intra_matrix[j] =
964 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
965 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
967 if (avctx->intra_matrix)
968 s->intra_matrix[j] = avctx->intra_matrix[i];
969 if (avctx->inter_matrix)
970 s->inter_matrix[j] = avctx->inter_matrix[i];
973 /* precompute matrix */
974 /* for mjpeg, we do include qscale in the matrix */
975 if (s->out_format != FMT_MJPEG) {
976 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
977 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
979 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
980 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
984 if ((ret = ff_rate_control_init(s)) < 0)
987 #if FF_API_PRIVATE_OPT
988 FF_DISABLE_DEPRECATION_WARNINGS
989 if (avctx->brd_scale)
990 s->brd_scale = avctx->brd_scale;
992 if (avctx->prediction_method)
993 s->pred = avctx->prediction_method + 1;
994 FF_ENABLE_DEPRECATION_WARNINGS
997 if (s->b_frame_strategy == 2) {
998 for (i = 0; i < s->max_b_frames + 2; i++) {
999 s->tmp_frames[i] = av_frame_alloc();
1000 if (!s->tmp_frames[i])
1001 return AVERROR(ENOMEM);
1003 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1004 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1005 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1007 ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1013 cpb_props = ff_add_cpb_side_data(avctx);
1015 return AVERROR(ENOMEM);
1016 cpb_props->max_bitrate = avctx->rc_max_rate;
1017 cpb_props->min_bitrate = avctx->rc_min_rate;
1018 cpb_props->avg_bitrate = avctx->bit_rate;
1019 cpb_props->buffer_size = avctx->rc_buffer_size;
1024 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1026 MpegEncContext *s = avctx->priv_data;
1029 ff_rate_control_uninit(s);
1031 ff_mpv_common_end(s);
1032 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
1033 s->out_format == FMT_MJPEG)
1034 ff_mjpeg_encode_close(s);
1036 av_freep(&avctx->extradata);
1038 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1039 av_frame_free(&s->tmp_frames[i]);
1041 ff_free_picture_tables(&s->new_picture);
1042 ff_mpeg_unref_picture(avctx, &s->new_picture);
1044 av_freep(&avctx->stats_out);
1045 av_freep(&s->ac_stats);
1047 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1048 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1049 s->q_chroma_intra_matrix= NULL;
1050 s->q_chroma_intra_matrix16= NULL;
1051 av_freep(&s->q_intra_matrix);
1052 av_freep(&s->q_inter_matrix);
1053 av_freep(&s->q_intra_matrix16);
1054 av_freep(&s->q_inter_matrix16);
1055 av_freep(&s->input_picture);
1056 av_freep(&s->reordered_input_picture);
1057 av_freep(&s->dct_offset);
1062 static int get_sae(uint8_t *src, int ref, int stride)
1067 for (y = 0; y < 16; y++) {
1068 for (x = 0; x < 16; x++) {
1069 acc += FFABS(src[x + y * stride] - ref);
1076 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1077 uint8_t *ref, int stride)
1083 h = s->height & ~15;
1085 for (y = 0; y < h; y += 16) {
1086 for (x = 0; x < w; x += 16) {
1087 int offset = x + y * stride;
1088 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1090 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1091 int sae = get_sae(src + offset, mean, stride);
1093 acc += sae + 500 < sad;
1099 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1101 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1102 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1103 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1104 &s->linesize, &s->uvlinesize);
1107 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1109 Picture *pic = NULL;
1111 int i, display_picture_number = 0, ret;
1112 int encoding_delay = s->max_b_frames ? s->max_b_frames
1113 : (s->low_delay ? 0 : 1);
1114 int flush_offset = 1;
1119 display_picture_number = s->input_picture_number++;
1121 if (pts != AV_NOPTS_VALUE) {
1122 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1123 int64_t last = s->user_specified_pts;
1126 av_log(s->avctx, AV_LOG_ERROR,
1127 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1129 return AVERROR(EINVAL);
1132 if (!s->low_delay && display_picture_number == 1)
1133 s->dts_delta = pts - last;
1135 s->user_specified_pts = pts;
1137 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1138 s->user_specified_pts =
1139 pts = s->user_specified_pts + 1;
1140 av_log(s->avctx, AV_LOG_INFO,
1141 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1144 pts = display_picture_number;
1148 if (!pic_arg->buf[0] ||
1149 pic_arg->linesize[0] != s->linesize ||
1150 pic_arg->linesize[1] != s->uvlinesize ||
1151 pic_arg->linesize[2] != s->uvlinesize)
1153 if ((s->width & 15) || (s->height & 15))
1155 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1157 if (s->linesize & (STRIDE_ALIGN-1))
1160 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1161 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1163 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1167 pic = &s->picture[i];
1171 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1174 ret = alloc_picture(s, pic, direct);
1179 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1180 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1181 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1184 int h_chroma_shift, v_chroma_shift;
1185 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1189 for (i = 0; i < 3; i++) {
1190 int src_stride = pic_arg->linesize[i];
1191 int dst_stride = i ? s->uvlinesize : s->linesize;
1192 int h_shift = i ? h_chroma_shift : 0;
1193 int v_shift = i ? v_chroma_shift : 0;
1194 int w = s->width >> h_shift;
1195 int h = s->height >> v_shift;
1196 uint8_t *src = pic_arg->data[i];
1197 uint8_t *dst = pic->f->data[i];
1200 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1201 && !s->progressive_sequence
1202 && FFALIGN(s->height, 32) - s->height > 16)
1205 if (!s->avctx->rc_buffer_size)
1206 dst += INPLACE_OFFSET;
1208 if (src_stride == dst_stride)
1209 memcpy(dst, src, src_stride * h);
1212 uint8_t *dst2 = dst;
1214 memcpy(dst2, src, w);
1219 if ((s->width & 15) || (s->height & (vpad-1))) {
1220 s->mpvencdsp.draw_edges(dst, dst_stride,
1230 ret = av_frame_copy_props(pic->f, pic_arg);
1234 pic->f->display_picture_number = display_picture_number;
1235 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1237 /* Flushing: When we have not received enough input frames,
1238 * ensure s->input_picture[0] contains the first picture */
1239 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1240 if (s->input_picture[flush_offset])
1243 if (flush_offset <= 1)
1246 encoding_delay = encoding_delay - flush_offset + 1;
1249 /* shift buffer entries */
1250 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1251 s->input_picture[i - flush_offset] = s->input_picture[i];
1253 s->input_picture[encoding_delay] = (Picture*) pic;
1258 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1262 int64_t score64 = 0;
1264 for (plane = 0; plane < 3; plane++) {
1265 const int stride = p->f->linesize[plane];
1266 const int bw = plane ? 1 : 2;
1267 for (y = 0; y < s->mb_height * bw; y++) {
1268 for (x = 0; x < s->mb_width * bw; x++) {
1269 int off = p->shared ? 0 : 16;
1270 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1271 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1272 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1274 switch (FFABS(s->frame_skip_exp)) {
1275 case 0: score = FFMAX(score, v); break;
1276 case 1: score += FFABS(v); break;
1277 case 2: score64 += v * (int64_t)v; break;
1278 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1279 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1288 if (s->frame_skip_exp < 0)
1289 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1290 -1.0/s->frame_skip_exp);
1292 if (score64 < s->frame_skip_threshold)
1294 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1299 static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
1304 ret = avcodec_send_frame(c, frame);
1309 ret = avcodec_receive_packet(c, pkt);
1312 av_packet_unref(pkt);
1313 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1320 static int estimate_best_b_count(MpegEncContext *s)
1322 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1324 const int scale = s->brd_scale;
1325 int width = s->width >> scale;
1326 int height = s->height >> scale;
1327 int i, j, out_size, p_lambda, b_lambda, lambda2;
1328 int64_t best_rd = INT64_MAX;
1329 int best_b_count = -1;
1332 av_assert0(scale >= 0 && scale <= 3);
1334 pkt = av_packet_alloc();
1336 return AVERROR(ENOMEM);
1339 //s->next_picture_ptr->quality;
1340 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1341 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1342 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1343 if (!b_lambda) // FIXME we should do this somewhere else
1344 b_lambda = p_lambda;
1345 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1348 for (i = 0; i < s->max_b_frames + 2; i++) {
1349 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1350 s->next_picture_ptr;
1353 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1354 pre_input = *pre_input_ptr;
1355 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1357 if (!pre_input.shared && i) {
1358 data[0] += INPLACE_OFFSET;
1359 data[1] += INPLACE_OFFSET;
1360 data[2] += INPLACE_OFFSET;
1363 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1364 s->tmp_frames[i]->linesize[0],
1366 pre_input.f->linesize[0],
1368 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1369 s->tmp_frames[i]->linesize[1],
1371 pre_input.f->linesize[1],
1372 width >> 1, height >> 1);
1373 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1374 s->tmp_frames[i]->linesize[2],
1376 pre_input.f->linesize[2],
1377 width >> 1, height >> 1);
1381 for (j = 0; j < s->max_b_frames + 1; j++) {
1385 if (!s->input_picture[j])
1388 c = avcodec_alloc_context3(NULL);
1390 ret = AVERROR(ENOMEM);
1396 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1397 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1398 c->mb_decision = s->avctx->mb_decision;
1399 c->me_cmp = s->avctx->me_cmp;
1400 c->mb_cmp = s->avctx->mb_cmp;
1401 c->me_sub_cmp = s->avctx->me_sub_cmp;
1402 c->pix_fmt = AV_PIX_FMT_YUV420P;
1403 c->time_base = s->avctx->time_base;
1404 c->max_b_frames = s->max_b_frames;
1406 ret = avcodec_open2(c, codec, NULL);
1411 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1412 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1414 out_size = encode_frame(c, s->tmp_frames[0], pkt);
1420 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1422 for (i = 0; i < s->max_b_frames + 1; i++) {
1423 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1425 s->tmp_frames[i + 1]->pict_type = is_p ?
1426 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1427 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1429 out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1435 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1438 /* get the delayed frames */
1439 out_size = encode_frame(c, NULL, pkt);
1444 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1446 rd += c->error[0] + c->error[1] + c->error[2];
1454 avcodec_free_context(&c);
1455 av_packet_unref(pkt);
1462 av_packet_free(&pkt);
1464 return best_b_count;
1467 static int select_input_picture(MpegEncContext *s)
1471 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1472 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1473 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1475 /* set next picture type & ordering */
1476 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1477 if (s->frame_skip_threshold || s->frame_skip_factor) {
1478 if (s->picture_in_gop_number < s->gop_size &&
1479 s->next_picture_ptr &&
1480 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1481 // FIXME check that the gop check above is +-1 correct
1482 av_frame_unref(s->input_picture[0]->f);
1484 ff_vbv_update(s, 0);
1490 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1491 !s->next_picture_ptr || s->intra_only) {
1492 s->reordered_input_picture[0] = s->input_picture[0];
1493 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1494 s->reordered_input_picture[0]->f->coded_picture_number =
1495 s->coded_picture_number++;
1499 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1500 for (i = 0; i < s->max_b_frames + 1; i++) {
1501 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1503 if (pict_num >= s->rc_context.num_entries)
1505 if (!s->input_picture[i]) {
1506 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1510 s->input_picture[i]->f->pict_type =
1511 s->rc_context.entry[pict_num].new_pict_type;
1515 if (s->b_frame_strategy == 0) {
1516 b_frames = s->max_b_frames;
1517 while (b_frames && !s->input_picture[b_frames])
1519 } else if (s->b_frame_strategy == 1) {
1520 for (i = 1; i < s->max_b_frames + 1; i++) {
1521 if (s->input_picture[i] &&
1522 s->input_picture[i]->b_frame_score == 0) {
1523 s->input_picture[i]->b_frame_score =
1525 s->input_picture[i ]->f->data[0],
1526 s->input_picture[i - 1]->f->data[0],
1530 for (i = 0; i < s->max_b_frames + 1; i++) {
1531 if (!s->input_picture[i] ||
1532 s->input_picture[i]->b_frame_score - 1 >
1533 s->mb_num / s->b_sensitivity)
1537 b_frames = FFMAX(0, i - 1);
1540 for (i = 0; i < b_frames + 1; i++) {
1541 s->input_picture[i]->b_frame_score = 0;
1543 } else if (s->b_frame_strategy == 2) {
1544 b_frames = estimate_best_b_count(s);
1551 for (i = b_frames - 1; i >= 0; i--) {
1552 int type = s->input_picture[i]->f->pict_type;
1553 if (type && type != AV_PICTURE_TYPE_B)
1556 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1557 b_frames == s->max_b_frames) {
1558 av_log(s->avctx, AV_LOG_ERROR,
1559 "warning, too many B-frames in a row\n");
1562 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1563 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1564 s->gop_size > s->picture_in_gop_number) {
1565 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1567 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1569 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1573 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1574 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1577 s->reordered_input_picture[0] = s->input_picture[b_frames];
1578 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1579 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1580 s->reordered_input_picture[0]->f->coded_picture_number =
1581 s->coded_picture_number++;
1582 for (i = 0; i < b_frames; i++) {
1583 s->reordered_input_picture[i + 1] = s->input_picture[i];
1584 s->reordered_input_picture[i + 1]->f->pict_type =
1586 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1587 s->coded_picture_number++;
1592 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1594 if (s->reordered_input_picture[0]) {
1595 s->reordered_input_picture[0]->reference =
1596 s->reordered_input_picture[0]->f->pict_type !=
1597 AV_PICTURE_TYPE_B ? 3 : 0;
1599 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1602 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1603 // input is a shared pix, so we can't modify it -> allocate a new
1604 // one & ensure that the shared one is reuseable
1607 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1610 pic = &s->picture[i];
1612 pic->reference = s->reordered_input_picture[0]->reference;
1613 if (alloc_picture(s, pic, 0) < 0) {
1617 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1621 /* mark us unused / free shared pic */
1622 av_frame_unref(s->reordered_input_picture[0]->f);
1623 s->reordered_input_picture[0]->shared = 0;
1625 s->current_picture_ptr = pic;
1627 // input is not a shared pix -> reuse buffer for current_pix
1628 s->current_picture_ptr = s->reordered_input_picture[0];
1629 for (i = 0; i < 4; i++) {
1630 if (s->new_picture.f->data[i])
1631 s->new_picture.f->data[i] += INPLACE_OFFSET;
1634 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1635 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1636 s->current_picture_ptr)) < 0)
1639 s->picture_number = s->new_picture.f->display_picture_number;
1644 static void frame_end(MpegEncContext *s)
1646 if (s->unrestricted_mv &&
1647 s->current_picture.reference &&
1649 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1650 int hshift = desc->log2_chroma_w;
1651 int vshift = desc->log2_chroma_h;
1652 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1653 s->current_picture.f->linesize[0],
1654 s->h_edge_pos, s->v_edge_pos,
1655 EDGE_WIDTH, EDGE_WIDTH,
1656 EDGE_TOP | EDGE_BOTTOM);
1657 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1658 s->current_picture.f->linesize[1],
1659 s->h_edge_pos >> hshift,
1660 s->v_edge_pos >> vshift,
1661 EDGE_WIDTH >> hshift,
1662 EDGE_WIDTH >> vshift,
1663 EDGE_TOP | EDGE_BOTTOM);
1664 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1665 s->current_picture.f->linesize[2],
1666 s->h_edge_pos >> hshift,
1667 s->v_edge_pos >> vshift,
1668 EDGE_WIDTH >> hshift,
1669 EDGE_WIDTH >> vshift,
1670 EDGE_TOP | EDGE_BOTTOM);
1675 s->last_pict_type = s->pict_type;
1676 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1677 if (s->pict_type!= AV_PICTURE_TYPE_B)
1678 s->last_non_b_pict_type = s->pict_type;
1680 #if FF_API_CODED_FRAME
1681 FF_DISABLE_DEPRECATION_WARNINGS
1682 av_frame_unref(s->avctx->coded_frame);
1683 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1684 FF_ENABLE_DEPRECATION_WARNINGS
1686 #if FF_API_ERROR_FRAME
1687 FF_DISABLE_DEPRECATION_WARNINGS
1688 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1689 sizeof(s->current_picture.encoding_error));
1690 FF_ENABLE_DEPRECATION_WARNINGS
1694 static void update_noise_reduction(MpegEncContext *s)
1698 for (intra = 0; intra < 2; intra++) {
1699 if (s->dct_count[intra] > (1 << 16)) {
1700 for (i = 0; i < 64; i++) {
1701 s->dct_error_sum[intra][i] >>= 1;
1703 s->dct_count[intra] >>= 1;
1706 for (i = 0; i < 64; i++) {
1707 s->dct_offset[intra][i] = (s->noise_reduction *
1708 s->dct_count[intra] +
1709 s->dct_error_sum[intra][i] / 2) /
1710 (s->dct_error_sum[intra][i] + 1);
1715 static int frame_start(MpegEncContext *s)
1719 /* mark & release old frames */
1720 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1721 s->last_picture_ptr != s->next_picture_ptr &&
1722 s->last_picture_ptr->f->buf[0]) {
1723 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1726 s->current_picture_ptr->f->pict_type = s->pict_type;
1727 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1729 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1730 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1731 s->current_picture_ptr)) < 0)
1734 if (s->pict_type != AV_PICTURE_TYPE_B) {
1735 s->last_picture_ptr = s->next_picture_ptr;
1737 s->next_picture_ptr = s->current_picture_ptr;
1740 if (s->last_picture_ptr) {
1741 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1742 if (s->last_picture_ptr->f->buf[0] &&
1743 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1744 s->last_picture_ptr)) < 0)
1747 if (s->next_picture_ptr) {
1748 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1749 if (s->next_picture_ptr->f->buf[0] &&
1750 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1751 s->next_picture_ptr)) < 0)
1755 if (s->picture_structure!= PICT_FRAME) {
1757 for (i = 0; i < 4; i++) {
1758 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1759 s->current_picture.f->data[i] +=
1760 s->current_picture.f->linesize[i];
1762 s->current_picture.f->linesize[i] *= 2;
1763 s->last_picture.f->linesize[i] *= 2;
1764 s->next_picture.f->linesize[i] *= 2;
1768 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1769 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1770 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1771 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1772 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1773 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1775 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1776 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1779 if (s->dct_error_sum) {
1780 av_assert2(s->noise_reduction && s->encoding);
1781 update_noise_reduction(s);
1787 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1788 const AVFrame *pic_arg, int *got_packet)
1790 MpegEncContext *s = avctx->priv_data;
1791 int i, stuffing_count, ret;
1792 int context_count = s->slice_context_count;
1794 s->vbv_ignore_qmax = 0;
1796 s->picture_in_gop_number++;
1798 if (load_input_picture(s, pic_arg) < 0)
1801 if (select_input_picture(s) < 0) {
1806 if (s->new_picture.f->data[0]) {
1807 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1808 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1810 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1811 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1814 s->mb_info_ptr = av_packet_new_side_data(pkt,
1815 AV_PKT_DATA_H263_MB_INFO,
1816 s->mb_width*s->mb_height*12);
1817 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1820 for (i = 0; i < context_count; i++) {
1821 int start_y = s->thread_context[i]->start_mb_y;
1822 int end_y = s->thread_context[i]-> end_mb_y;
1823 int h = s->mb_height;
1824 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1825 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1827 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1830 s->pict_type = s->new_picture.f->pict_type;
1832 ret = frame_start(s);
1836 ret = encode_picture(s, s->picture_number);
1837 if (growing_buffer) {
1838 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1839 pkt->data = s->pb.buf;
1840 pkt->size = avctx->internal->byte_buffer_size;
1845 #if FF_API_STAT_BITS
1846 FF_DISABLE_DEPRECATION_WARNINGS
1847 avctx->header_bits = s->header_bits;
1848 avctx->mv_bits = s->mv_bits;
1849 avctx->misc_bits = s->misc_bits;
1850 avctx->i_tex_bits = s->i_tex_bits;
1851 avctx->p_tex_bits = s->p_tex_bits;
1852 avctx->i_count = s->i_count;
1853 // FIXME f/b_count in avctx
1854 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1855 avctx->skip_count = s->skip_count;
1856 FF_ENABLE_DEPRECATION_WARNINGS
1861 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
1862 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1864 if (avctx->rc_buffer_size) {
1865 RateControlContext *rcc = &s->rc_context;
1866 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1867 int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1868 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1870 if (put_bits_count(&s->pb) > max_size &&
1871 s->lambda < s->lmax) {
1872 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1873 (s->qscale + 1) / s->qscale);
1874 if (s->adaptive_quant) {
1876 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1877 s->lambda_table[i] =
1878 FFMAX(s->lambda_table[i] + min_step,
1879 s->lambda_table[i] * (s->qscale + 1) /
1882 s->mb_skipped = 0; // done in frame_start()
1883 // done in encode_picture() so we must undo it
1884 if (s->pict_type == AV_PICTURE_TYPE_P) {
1885 if (s->flipflop_rounding ||
1886 s->codec_id == AV_CODEC_ID_H263P ||
1887 s->codec_id == AV_CODEC_ID_MPEG4)
1888 s->no_rounding ^= 1;
1890 if (s->pict_type != AV_PICTURE_TYPE_B) {
1891 s->time_base = s->last_time_base;
1892 s->last_non_b_time = s->time - s->pp_time;
1894 for (i = 0; i < context_count; i++) {
1895 PutBitContext *pb = &s->thread_context[i]->pb;
1896 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1898 s->vbv_ignore_qmax = 1;
1899 av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1903 av_assert0(avctx->rc_max_rate);
1906 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1907 ff_write_pass1_stats(s);
1909 for (i = 0; i < 4; i++) {
1910 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1911 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1913 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1914 s->current_picture_ptr->encoding_error,
1915 (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1918 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1919 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1920 s->misc_bits + s->i_tex_bits +
1922 flush_put_bits(&s->pb);
1923 s->frame_bits = put_bits_count(&s->pb);
1925 stuffing_count = ff_vbv_update(s, s->frame_bits);
1926 s->stuffing_bits = 8*stuffing_count;
1927 if (stuffing_count) {
1928 if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1929 av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1933 switch (s->codec_id) {
1934 case AV_CODEC_ID_MPEG1VIDEO:
1935 case AV_CODEC_ID_MPEG2VIDEO:
1936 while (stuffing_count--) {
1937 put_bits(&s->pb, 8, 0);
1940 case AV_CODEC_ID_MPEG4:
1941 put_bits(&s->pb, 16, 0);
1942 put_bits(&s->pb, 16, 0x1C3);
1943 stuffing_count -= 4;
1944 while (stuffing_count--) {
1945 put_bits(&s->pb, 8, 0xFF);
1949 av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1951 flush_put_bits(&s->pb);
1952 s->frame_bits = put_bits_count(&s->pb);
1955 /* update MPEG-1/2 vbv_delay for CBR */
1956 if (avctx->rc_max_rate &&
1957 avctx->rc_min_rate == avctx->rc_max_rate &&
1958 s->out_format == FMT_MPEG1 &&
1959 90000LL * (avctx->rc_buffer_size - 1) <=
1960 avctx->rc_max_rate * 0xFFFFLL) {
1961 AVCPBProperties *props;
1964 int vbv_delay, min_delay;
1965 double inbits = avctx->rc_max_rate *
1966 av_q2d(avctx->time_base);
1967 int minbits = s->frame_bits - 8 *
1968 (s->vbv_delay_ptr - s->pb.buf - 1);
1969 double bits = s->rc_context.buffer_index + minbits - inbits;
1972 av_log(avctx, AV_LOG_ERROR,
1973 "Internal error, negative bits\n");
1975 av_assert1(s->repeat_first_field == 0);
1977 vbv_delay = bits * 90000 / avctx->rc_max_rate;
1978 min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
1981 vbv_delay = FFMAX(vbv_delay, min_delay);
1983 av_assert0(vbv_delay < 0xFFFF);
1985 s->vbv_delay_ptr[0] &= 0xF8;
1986 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1987 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1988 s->vbv_delay_ptr[2] &= 0x07;
1989 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1991 props = av_cpb_properties_alloc(&props_size);
1993 return AVERROR(ENOMEM);
1994 props->vbv_delay = vbv_delay * 300;
1996 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
1997 (uint8_t*)props, props_size);
2003 #if FF_API_VBV_DELAY
2004 FF_DISABLE_DEPRECATION_WARNINGS
2005 avctx->vbv_delay = vbv_delay * 300;
2006 FF_ENABLE_DEPRECATION_WARNINGS
2009 s->total_bits += s->frame_bits;
2010 #if FF_API_STAT_BITS
2011 FF_DISABLE_DEPRECATION_WARNINGS
2012 avctx->frame_bits = s->frame_bits;
2013 FF_ENABLE_DEPRECATION_WARNINGS
2017 pkt->pts = s->current_picture.f->pts;
2018 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2019 if (!s->current_picture.f->coded_picture_number)
2020 pkt->dts = pkt->pts - s->dts_delta;
2022 pkt->dts = s->reordered_pts;
2023 s->reordered_pts = pkt->pts;
2025 pkt->dts = pkt->pts;
2026 if (s->current_picture.f->key_frame)
2027 pkt->flags |= AV_PKT_FLAG_KEY;
2029 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2034 /* release non-reference frames */
2035 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2036 if (!s->picture[i].reference)
2037 ff_mpeg_unref_picture(avctx, &s->picture[i]);
2040 av_assert1((s->frame_bits & 7) == 0);
2042 pkt->size = s->frame_bits / 8;
2043 *got_packet = !!pkt->size;
2047 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2048 int n, int threshold)
2050 static const char tab[64] = {
2051 3, 2, 2, 1, 1, 1, 1, 1,
2052 1, 1, 1, 1, 1, 1, 1, 1,
2053 1, 1, 1, 1, 1, 1, 1, 1,
2054 0, 0, 0, 0, 0, 0, 0, 0,
2055 0, 0, 0, 0, 0, 0, 0, 0,
2056 0, 0, 0, 0, 0, 0, 0, 0,
2057 0, 0, 0, 0, 0, 0, 0, 0,
2058 0, 0, 0, 0, 0, 0, 0, 0
2063 int16_t *block = s->block[n];
2064 const int last_index = s->block_last_index[n];
2067 if (threshold < 0) {
2069 threshold = -threshold;
2073 /* Are all we could set to zero already zero? */
2074 if (last_index <= skip_dc - 1)
2077 for (i = 0; i <= last_index; i++) {
2078 const int j = s->intra_scantable.permutated[i];
2079 const int level = FFABS(block[j]);
2081 if (skip_dc && i == 0)
2085 } else if (level > 1) {
2091 if (score >= threshold)
2093 for (i = skip_dc; i <= last_index; i++) {
2094 const int j = s->intra_scantable.permutated[i];
2098 s->block_last_index[n] = 0;
2100 s->block_last_index[n] = -1;
2103 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2107 const int maxlevel = s->max_qcoeff;
2108 const int minlevel = s->min_qcoeff;
2112 i = 1; // skip clipping of intra dc
2116 for (; i <= last_index; i++) {
2117 const int j = s->intra_scantable.permutated[i];
2118 int level = block[j];
2120 if (level > maxlevel) {
2123 } else if (level < minlevel) {
2131 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2132 av_log(s->avctx, AV_LOG_INFO,
2133 "warning, clipping %d dct coefficients to %d..%d\n",
2134 overflow, minlevel, maxlevel);
2137 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2141 for (y = 0; y < 8; y++) {
2142 for (x = 0; x < 8; x++) {
2148 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2149 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2150 int v = ptr[x2 + y2 * stride];
2156 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2161 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2162 int motion_x, int motion_y,
2163 int mb_block_height,
2167 int16_t weight[12][64];
2168 int16_t orig[12][64];
2169 const int mb_x = s->mb_x;
2170 const int mb_y = s->mb_y;
2173 int dct_offset = s->linesize * 8; // default for progressive frames
2174 int uv_dct_offset = s->uvlinesize * 8;
2175 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2176 ptrdiff_t wrap_y, wrap_c;
2178 for (i = 0; i < mb_block_count; i++)
2179 skip_dct[i] = s->skipdct;
2181 if (s->adaptive_quant) {
2182 const int last_qp = s->qscale;
2183 const int mb_xy = mb_x + mb_y * s->mb_stride;
2185 s->lambda = s->lambda_table[mb_xy];
2188 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2189 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2190 s->dquant = s->qscale - last_qp;
2192 if (s->out_format == FMT_H263) {
2193 s->dquant = av_clip(s->dquant, -2, 2);
2195 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2197 if (s->pict_type == AV_PICTURE_TYPE_B) {
2198 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2201 if (s->mv_type == MV_TYPE_8X8)
2207 ff_set_qscale(s, last_qp + s->dquant);
2208 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2209 ff_set_qscale(s, s->qscale + s->dquant);
2211 wrap_y = s->linesize;
2212 wrap_c = s->uvlinesize;
2213 ptr_y = s->new_picture.f->data[0] +
2214 (mb_y * 16 * wrap_y) + mb_x * 16;
2215 ptr_cb = s->new_picture.f->data[1] +
2216 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2217 ptr_cr = s->new_picture.f->data[2] +
2218 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2220 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2221 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2222 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2223 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2224 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2226 16, 16, mb_x * 16, mb_y * 16,
2227 s->width, s->height);
2229 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2231 mb_block_width, mb_block_height,
2232 mb_x * mb_block_width, mb_y * mb_block_height,
2234 ptr_cb = ebuf + 16 * wrap_y;
2235 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2237 mb_block_width, mb_block_height,
2238 mb_x * mb_block_width, mb_y * mb_block_height,
2240 ptr_cr = ebuf + 16 * wrap_y + 16;
2244 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2245 int progressive_score, interlaced_score;
2247 s->interlaced_dct = 0;
2248 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2249 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2250 NULL, wrap_y, 8) - 400;
2252 if (progressive_score > 0) {
2253 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2254 NULL, wrap_y * 2, 8) +
2255 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2256 NULL, wrap_y * 2, 8);
2257 if (progressive_score > interlaced_score) {
2258 s->interlaced_dct = 1;
2260 dct_offset = wrap_y;
2261 uv_dct_offset = wrap_c;
2263 if (s->chroma_format == CHROMA_422 ||
2264 s->chroma_format == CHROMA_444)
2270 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2271 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2272 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2273 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2275 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2279 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2280 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2281 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2282 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2283 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2284 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2285 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2286 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2287 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2288 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2289 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2290 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2294 op_pixels_func (*op_pix)[4];
2295 qpel_mc_func (*op_qpix)[16];
2296 uint8_t *dest_y, *dest_cb, *dest_cr;
2298 dest_y = s->dest[0];
2299 dest_cb = s->dest[1];
2300 dest_cr = s->dest[2];
2302 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2303 op_pix = s->hdsp.put_pixels_tab;
2304 op_qpix = s->qdsp.put_qpel_pixels_tab;
2306 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2307 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2310 if (s->mv_dir & MV_DIR_FORWARD) {
2311 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2312 s->last_picture.f->data,
2314 op_pix = s->hdsp.avg_pixels_tab;
2315 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2317 if (s->mv_dir & MV_DIR_BACKWARD) {
2318 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2319 s->next_picture.f->data,
2323 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2324 int progressive_score, interlaced_score;
2326 s->interlaced_dct = 0;
2327 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2328 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2332 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2333 progressive_score -= 400;
2335 if (progressive_score > 0) {
2336 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2338 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2342 if (progressive_score > interlaced_score) {
2343 s->interlaced_dct = 1;
2345 dct_offset = wrap_y;
2346 uv_dct_offset = wrap_c;
2348 if (s->chroma_format == CHROMA_422)
2354 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2355 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2356 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2357 dest_y + dct_offset, wrap_y);
2358 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2359 dest_y + dct_offset + 8, wrap_y);
2361 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2365 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2366 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2367 if (!s->chroma_y_shift) { /* 422 */
2368 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2369 dest_cb + uv_dct_offset, wrap_c);
2370 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2371 dest_cr + uv_dct_offset, wrap_c);
2374 /* pre quantization */
2375 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2376 2 * s->qscale * s->qscale) {
2378 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2380 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2382 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2383 wrap_y, 8) < 20 * s->qscale)
2385 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2386 wrap_y, 8) < 20 * s->qscale)
2388 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2390 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2392 if (!s->chroma_y_shift) { /* 422 */
2393 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2394 dest_cb + uv_dct_offset,
2395 wrap_c, 8) < 20 * s->qscale)
2397 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2398 dest_cr + uv_dct_offset,
2399 wrap_c, 8) < 20 * s->qscale)
2405 if (s->quantizer_noise_shaping) {
2407 get_visual_weight(weight[0], ptr_y , wrap_y);
2409 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2411 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2413 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2415 get_visual_weight(weight[4], ptr_cb , wrap_c);
2417 get_visual_weight(weight[5], ptr_cr , wrap_c);
2418 if (!s->chroma_y_shift) { /* 422 */
2420 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2423 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2426 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2429 /* DCT & quantize */
2430 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2432 for (i = 0; i < mb_block_count; i++) {
2435 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2436 // FIXME we could decide to change to quantizer instead of
2438 // JS: I don't think that would be a good idea it could lower
2439 // quality instead of improve it. Just INTRADC clipping
2440 // deserves changes in quantizer
2442 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2444 s->block_last_index[i] = -1;
2446 if (s->quantizer_noise_shaping) {
2447 for (i = 0; i < mb_block_count; i++) {
2449 s->block_last_index[i] =
2450 dct_quantize_refine(s, s->block[i], weight[i],
2451 orig[i], i, s->qscale);
2456 if (s->luma_elim_threshold && !s->mb_intra)
2457 for (i = 0; i < 4; i++)
2458 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2459 if (s->chroma_elim_threshold && !s->mb_intra)
2460 for (i = 4; i < mb_block_count; i++)
2461 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2463 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2464 for (i = 0; i < mb_block_count; i++) {
2465 if (s->block_last_index[i] == -1)
2466 s->coded_score[i] = INT_MAX / 256;
2471 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2472 s->block_last_index[4] =
2473 s->block_last_index[5] = 0;
2475 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2476 if (!s->chroma_y_shift) { /* 422 / 444 */
2477 for (i=6; i<12; i++) {
2478 s->block_last_index[i] = 0;
2479 s->block[i][0] = s->block[4][0];
2484 // non c quantize code returns incorrect block_last_index FIXME
2485 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2486 for (i = 0; i < mb_block_count; i++) {
2488 if (s->block_last_index[i] > 0) {
2489 for (j = 63; j > 0; j--) {
2490 if (s->block[i][s->intra_scantable.permutated[j]])
2493 s->block_last_index[i] = j;
2498 /* huffman encode */
2499 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2500 case AV_CODEC_ID_MPEG1VIDEO:
2501 case AV_CODEC_ID_MPEG2VIDEO:
2502 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2503 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2505 case AV_CODEC_ID_MPEG4:
2506 if (CONFIG_MPEG4_ENCODER)
2507 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2509 case AV_CODEC_ID_MSMPEG4V2:
2510 case AV_CODEC_ID_MSMPEG4V3:
2511 case AV_CODEC_ID_WMV1:
2512 if (CONFIG_MSMPEG4_ENCODER)
2513 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2515 case AV_CODEC_ID_WMV2:
2516 if (CONFIG_WMV2_ENCODER)
2517 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2519 case AV_CODEC_ID_H261:
2520 if (CONFIG_H261_ENCODER)
2521 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2523 case AV_CODEC_ID_H263:
2524 case AV_CODEC_ID_H263P:
2525 case AV_CODEC_ID_FLV1:
2526 case AV_CODEC_ID_RV10:
2527 case AV_CODEC_ID_RV20:
2528 if (CONFIG_H263_ENCODER)
2529 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2531 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2532 case AV_CODEC_ID_MJPEG:
2533 case AV_CODEC_ID_AMV:
2534 ff_mjpeg_encode_mb(s, s->block);
2537 case AV_CODEC_ID_SPEEDHQ:
2538 if (CONFIG_SPEEDHQ_ENCODER)
2539 ff_speedhq_encode_mb(s, s->block);
2546 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2548 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2549 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2550 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2553 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2556 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2559 d->mb_skip_run= s->mb_skip_run;
2561 d->last_dc[i] = s->last_dc[i];
2564 d->mv_bits= s->mv_bits;
2565 d->i_tex_bits= s->i_tex_bits;
2566 d->p_tex_bits= s->p_tex_bits;
2567 d->i_count= s->i_count;
2568 d->f_count= s->f_count;
2569 d->b_count= s->b_count;
2570 d->skip_count= s->skip_count;
2571 d->misc_bits= s->misc_bits;
2575 d->qscale= s->qscale;
2576 d->dquant= s->dquant;
2578 d->esc3_level_length= s->esc3_level_length;
2581 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2584 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2585 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2588 d->mb_skip_run= s->mb_skip_run;
2590 d->last_dc[i] = s->last_dc[i];
2593 d->mv_bits= s->mv_bits;
2594 d->i_tex_bits= s->i_tex_bits;
2595 d->p_tex_bits= s->p_tex_bits;
2596 d->i_count= s->i_count;
2597 d->f_count= s->f_count;
2598 d->b_count= s->b_count;
2599 d->skip_count= s->skip_count;
2600 d->misc_bits= s->misc_bits;
2602 d->mb_intra= s->mb_intra;
2603 d->mb_skipped= s->mb_skipped;
2604 d->mv_type= s->mv_type;
2605 d->mv_dir= s->mv_dir;
2607 if(s->data_partitioning){
2609 d->tex_pb= s->tex_pb;
2613 d->block_last_index[i]= s->block_last_index[i];
2614 d->interlaced_dct= s->interlaced_dct;
2615 d->qscale= s->qscale;
2617 d->esc3_level_length= s->esc3_level_length;
2620 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2621 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2622 int *dmin, int *next_block, int motion_x, int motion_y)
2625 uint8_t *dest_backup[3];
2627 copy_context_before_encode(s, backup, type);
2629 s->block= s->blocks[*next_block];
2630 s->pb= pb[*next_block];
2631 if(s->data_partitioning){
2632 s->pb2 = pb2 [*next_block];
2633 s->tex_pb= tex_pb[*next_block];
2637 memcpy(dest_backup, s->dest, sizeof(s->dest));
2638 s->dest[0] = s->sc.rd_scratchpad;
2639 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2640 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2641 av_assert0(s->linesize >= 32); //FIXME
2644 encode_mb(s, motion_x, motion_y);
2646 score= put_bits_count(&s->pb);
2647 if(s->data_partitioning){
2648 score+= put_bits_count(&s->pb2);
2649 score+= put_bits_count(&s->tex_pb);
2652 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2653 ff_mpv_reconstruct_mb(s, s->block);
2655 score *= s->lambda2;
2656 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2660 memcpy(s->dest, dest_backup, sizeof(s->dest));
2667 copy_context_after_encode(best, s, type);
2671 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2672 const uint32_t *sq = ff_square_tab + 256;
2677 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2678 else if(w==8 && h==8)
2679 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2683 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2692 static int sse_mb(MpegEncContext *s){
2696 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2697 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2700 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2701 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2702 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2703 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2705 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2706 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2707 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2710 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2711 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2712 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2715 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2716 MpegEncContext *s= *(void**)arg;
2720 s->me.dia_size= s->avctx->pre_dia_size;
2721 s->first_slice_line=1;
2722 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2723 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2724 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2726 s->first_slice_line=0;
2734 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2735 MpegEncContext *s= *(void**)arg;
2737 s->me.dia_size= s->avctx->dia_size;
2738 s->first_slice_line=1;
2739 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2740 s->mb_x=0; //for block init below
2741 ff_init_block_index(s);
2742 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2743 s->block_index[0]+=2;
2744 s->block_index[1]+=2;
2745 s->block_index[2]+=2;
2746 s->block_index[3]+=2;
2748 /* compute motion vector & mb_type and store in context */
2749 if(s->pict_type==AV_PICTURE_TYPE_B)
2750 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2752 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2754 s->first_slice_line=0;
2759 static int mb_var_thread(AVCodecContext *c, void *arg){
2760 MpegEncContext *s= *(void**)arg;
2763 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2764 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2767 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2769 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2771 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2772 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2774 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2775 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2776 s->me.mb_var_sum_temp += varc;
2782 static void write_slice_end(MpegEncContext *s){
2783 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2784 if(s->partitioned_frame){
2785 ff_mpeg4_merge_partitions(s);
2788 ff_mpeg4_stuffing(&s->pb);
2789 } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2790 s->out_format == FMT_MJPEG) {
2791 ff_mjpeg_encode_stuffing(s);
2792 } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2793 ff_speedhq_end_slice(s);
2796 flush_put_bits(&s->pb);
2798 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2799 s->misc_bits+= get_bits_diff(s);
2802 static void write_mb_info(MpegEncContext *s)
2804 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2805 int offset = put_bits_count(&s->pb);
2806 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2807 int gobn = s->mb_y / s->gob_index;
2809 if (CONFIG_H263_ENCODER)
2810 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2811 bytestream_put_le32(&ptr, offset);
2812 bytestream_put_byte(&ptr, s->qscale);
2813 bytestream_put_byte(&ptr, gobn);
2814 bytestream_put_le16(&ptr, mba);
2815 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2816 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2817 /* 4MV not implemented */
2818 bytestream_put_byte(&ptr, 0); /* hmv2 */
2819 bytestream_put_byte(&ptr, 0); /* vmv2 */
2822 static void update_mb_info(MpegEncContext *s, int startcode)
2826 if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2827 s->mb_info_size += 12;
2828 s->prev_mb_info = s->last_mb_info;
2831 s->prev_mb_info = put_bytes_count(&s->pb, 0);
2832 /* This might have incremented mb_info_size above, and we return without
2833 * actually writing any info into that slot yet. But in that case,
2834 * this will be called again at the start of the after writing the
2835 * start code, actually writing the mb info. */
2839 s->last_mb_info = put_bytes_count(&s->pb, 0);
2840 if (!s->mb_info_size)
2841 s->mb_info_size += 12;
2845 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2847 if (put_bytes_left(&s->pb, 0) < threshold
2848 && s->slice_context_count == 1
2849 && s->pb.buf == s->avctx->internal->byte_buffer) {
2850 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2851 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2853 uint8_t *new_buffer = NULL;
2854 int new_buffer_size = 0;
2856 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2857 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2858 return AVERROR(ENOMEM);
2863 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2864 s->avctx->internal->byte_buffer_size + size_increase);
2866 return AVERROR(ENOMEM);
2868 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2869 av_free(s->avctx->internal->byte_buffer);
2870 s->avctx->internal->byte_buffer = new_buffer;
2871 s->avctx->internal->byte_buffer_size = new_buffer_size;
2872 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2873 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2874 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2876 if (put_bytes_left(&s->pb, 0) < threshold)
2877 return AVERROR(EINVAL);
2881 static int encode_thread(AVCodecContext *c, void *arg){
2882 MpegEncContext *s= *(void**)arg;
2883 int mb_x, mb_y, mb_y_order;
2884 int chr_h= 16>>s->chroma_y_shift;
2886 MpegEncContext best_s = { 0 }, backup_s;
2887 uint8_t bit_buf[2][MAX_MB_BYTES];
2888 uint8_t bit_buf2[2][MAX_MB_BYTES];
2889 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2890 PutBitContext pb[2], pb2[2], tex_pb[2];
2893 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2894 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2895 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2898 s->last_bits= put_bits_count(&s->pb);
2909 /* init last dc values */
2910 /* note: quant matrix value (8) is implied here */
2911 s->last_dc[i] = 128 << s->intra_dc_precision;
2913 s->current_picture.encoding_error[i] = 0;
2915 if(s->codec_id==AV_CODEC_ID_AMV){
2916 s->last_dc[0] = 128*8/13;
2917 s->last_dc[1] = 128*8/14;
2918 s->last_dc[2] = 128*8/14;
2921 memset(s->last_mv, 0, sizeof(s->last_mv));
2925 switch(s->codec_id){
2926 case AV_CODEC_ID_H263:
2927 case AV_CODEC_ID_H263P:
2928 case AV_CODEC_ID_FLV1:
2929 if (CONFIG_H263_ENCODER)
2930 s->gob_index = H263_GOB_HEIGHT(s->height);
2932 case AV_CODEC_ID_MPEG4:
2933 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2934 ff_mpeg4_init_partitions(s);
2940 s->first_slice_line = 1;
2941 s->ptr_lastgob = s->pb.buf;
2942 for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2943 if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
2945 mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
2946 if (first_in_slice && mb_y_order != s->start_mb_y)
2947 ff_speedhq_end_slice(s);
2948 s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
2955 ff_set_qscale(s, s->qscale);
2956 ff_init_block_index(s);
2958 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2959 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2960 int mb_type= s->mb_type[xy];
2964 int size_increase = s->avctx->internal->byte_buffer_size/4
2965 + s->mb_width*MAX_MB_BYTES;
2967 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2968 if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
2969 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2972 if(s->data_partitioning){
2973 if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
2974 put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
2975 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2981 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2982 ff_update_block_index(s);
2984 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2985 ff_h261_reorder_mb_index(s);
2986 xy= s->mb_y*s->mb_stride + s->mb_x;
2987 mb_type= s->mb_type[xy];
2990 /* write gob / video packet header */
2992 int current_packet_size, is_gob_start;
2994 current_packet_size = put_bytes_count(&s->pb, 1)
2995 - (s->ptr_lastgob - s->pb.buf);
2997 is_gob_start = s->rtp_payload_size &&
2998 current_packet_size >= s->rtp_payload_size &&
3001 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3003 switch(s->codec_id){
3004 case AV_CODEC_ID_H263:
3005 case AV_CODEC_ID_H263P:
3006 if(!s->h263_slice_structured)
3007 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3009 case AV_CODEC_ID_MPEG2VIDEO:
3010 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3011 case AV_CODEC_ID_MPEG1VIDEO:
3012 if(s->mb_skip_run) is_gob_start=0;
3014 case AV_CODEC_ID_MJPEG:
3015 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3020 if(s->start_mb_y != mb_y || mb_x!=0){
3023 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3024 ff_mpeg4_init_partitions(s);
3028 av_assert2((put_bits_count(&s->pb)&7) == 0);
3029 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3031 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3032 int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
3033 int d = 100 / s->error_rate;
3035 current_packet_size=0;
3036 s->pb.buf_ptr= s->ptr_lastgob;
3037 av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3041 #if FF_API_RTP_CALLBACK
3042 FF_DISABLE_DEPRECATION_WARNINGS
3043 if (s->avctx->rtp_callback){
3044 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3045 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3047 FF_ENABLE_DEPRECATION_WARNINGS
3049 update_mb_info(s, 1);
3051 switch(s->codec_id){
3052 case AV_CODEC_ID_MPEG4:
3053 if (CONFIG_MPEG4_ENCODER) {
3054 ff_mpeg4_encode_video_packet_header(s);
3055 ff_mpeg4_clean_buffers(s);
3058 case AV_CODEC_ID_MPEG1VIDEO:
3059 case AV_CODEC_ID_MPEG2VIDEO:
3060 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3061 ff_mpeg1_encode_slice_header(s);
3062 ff_mpeg1_clean_buffers(s);
3065 case AV_CODEC_ID_H263:
3066 case AV_CODEC_ID_H263P:
3067 if (CONFIG_H263_ENCODER)
3068 ff_h263_encode_gob_header(s, mb_y);
3072 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3073 int bits= put_bits_count(&s->pb);
3074 s->misc_bits+= bits - s->last_bits;
3078 s->ptr_lastgob += current_packet_size;
3079 s->first_slice_line=1;
3080 s->resync_mb_x=mb_x;
3081 s->resync_mb_y=mb_y;
3085 if( (s->resync_mb_x == s->mb_x)
3086 && s->resync_mb_y+1 == s->mb_y){
3087 s->first_slice_line=0;
3091 s->dquant=0; //only for QP_RD
3093 update_mb_info(s, 0);
3095 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3097 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3099 copy_context_before_encode(&backup_s, s, -1);
3101 best_s.data_partitioning= s->data_partitioning;
3102 best_s.partitioned_frame= s->partitioned_frame;
3103 if(s->data_partitioning){
3104 backup_s.pb2= s->pb2;
3105 backup_s.tex_pb= s->tex_pb;
3108 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3109 s->mv_dir = MV_DIR_FORWARD;
3110 s->mv_type = MV_TYPE_16X16;
3112 s->mv[0][0][0] = s->p_mv_table[xy][0];
3113 s->mv[0][0][1] = s->p_mv_table[xy][1];
3114 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3115 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3117 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3118 s->mv_dir = MV_DIR_FORWARD;
3119 s->mv_type = MV_TYPE_FIELD;
3122 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3123 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3124 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3126 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3127 &dmin, &next_block, 0, 0);
3129 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3130 s->mv_dir = MV_DIR_FORWARD;
3131 s->mv_type = MV_TYPE_16X16;
3135 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3136 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3138 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3139 s->mv_dir = MV_DIR_FORWARD;
3140 s->mv_type = MV_TYPE_8X8;
3143 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3144 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3146 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3147 &dmin, &next_block, 0, 0);
3149 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3150 s->mv_dir = MV_DIR_FORWARD;
3151 s->mv_type = MV_TYPE_16X16;
3153 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3154 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3155 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3156 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3158 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3159 s->mv_dir = MV_DIR_BACKWARD;
3160 s->mv_type = MV_TYPE_16X16;
3162 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3163 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3164 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3165 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3167 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3168 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3169 s->mv_type = MV_TYPE_16X16;
3171 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3172 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3173 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3174 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3175 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3176 &dmin, &next_block, 0, 0);
3178 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3179 s->mv_dir = MV_DIR_FORWARD;
3180 s->mv_type = MV_TYPE_FIELD;
3183 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3184 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3185 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3187 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3188 &dmin, &next_block, 0, 0);
3190 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3191 s->mv_dir = MV_DIR_BACKWARD;
3192 s->mv_type = MV_TYPE_FIELD;
3195 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3196 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3197 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3199 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3200 &dmin, &next_block, 0, 0);
3202 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3203 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3204 s->mv_type = MV_TYPE_FIELD;
3206 for(dir=0; dir<2; dir++){
3208 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3209 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3210 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3213 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3214 &dmin, &next_block, 0, 0);
3216 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3218 s->mv_type = MV_TYPE_16X16;
3222 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3223 &dmin, &next_block, 0, 0);
3224 if(s->h263_pred || s->h263_aic){
3226 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3228 ff_clean_intra_table_entries(s); //old mode?
3232 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3233 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3234 const int last_qp= backup_s.qscale;
3237 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3238 static const int dquant_tab[4]={-1,1,-2,2};
3239 int storecoefs = s->mb_intra && s->dc_val[0];
3241 av_assert2(backup_s.dquant == 0);
3244 s->mv_dir= best_s.mv_dir;
3245 s->mv_type = MV_TYPE_16X16;
3246 s->mb_intra= best_s.mb_intra;
3247 s->mv[0][0][0] = best_s.mv[0][0][0];
3248 s->mv[0][0][1] = best_s.mv[0][0][1];
3249 s->mv[1][0][0] = best_s.mv[1][0][0];
3250 s->mv[1][0][1] = best_s.mv[1][0][1];
3252 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3253 for(; qpi<4; qpi++){
3254 int dquant= dquant_tab[qpi];
3255 qp= last_qp + dquant;
3256 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3258 backup_s.dquant= dquant;
3261 dc[i]= s->dc_val[0][ s->block_index[i] ];
3262 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3266 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3267 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3268 if(best_s.qscale != qp){
3271 s->dc_val[0][ s->block_index[i] ]= dc[i];
3272 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3279 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3280 int mx= s->b_direct_mv_table[xy][0];
3281 int my= s->b_direct_mv_table[xy][1];
3283 backup_s.dquant = 0;
3284 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3286 ff_mpeg4_set_direct_mv(s, mx, my);
3287 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3288 &dmin, &next_block, mx, my);
3290 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3291 backup_s.dquant = 0;
3292 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3294 ff_mpeg4_set_direct_mv(s, 0, 0);
3295 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3296 &dmin, &next_block, 0, 0);
3298 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3301 coded |= s->block_last_index[i];
3304 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3305 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3306 mx=my=0; //FIXME find the one we actually used
3307 ff_mpeg4_set_direct_mv(s, mx, my);
3308 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3316 s->mv_dir= best_s.mv_dir;
3317 s->mv_type = best_s.mv_type;
3319 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3320 s->mv[0][0][1] = best_s.mv[0][0][1];
3321 s->mv[1][0][0] = best_s.mv[1][0][0];
3322 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3325 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3326 &dmin, &next_block, mx, my);
3331 s->current_picture.qscale_table[xy] = best_s.qscale;
3333 copy_context_after_encode(s, &best_s, -1);
3335 pb_bits_count= put_bits_count(&s->pb);
3336 flush_put_bits(&s->pb);
3337 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3340 if(s->data_partitioning){
3341 pb2_bits_count= put_bits_count(&s->pb2);
3342 flush_put_bits(&s->pb2);
3343 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3344 s->pb2= backup_s.pb2;
3346 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3347 flush_put_bits(&s->tex_pb);
3348 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3349 s->tex_pb= backup_s.tex_pb;
3351 s->last_bits= put_bits_count(&s->pb);
3353 if (CONFIG_H263_ENCODER &&
3354 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3355 ff_h263_update_motion_val(s);
3357 if(next_block==0){ //FIXME 16 vs linesize16
3358 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3359 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3360 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3363 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3364 ff_mpv_reconstruct_mb(s, s->block);
3366 int motion_x = 0, motion_y = 0;
3367 s->mv_type=MV_TYPE_16X16;
3368 // only one MB-Type possible
3371 case CANDIDATE_MB_TYPE_INTRA:
3374 motion_x= s->mv[0][0][0] = 0;
3375 motion_y= s->mv[0][0][1] = 0;
3377 case CANDIDATE_MB_TYPE_INTER:
3378 s->mv_dir = MV_DIR_FORWARD;
3380 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3381 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3383 case CANDIDATE_MB_TYPE_INTER_I:
3384 s->mv_dir = MV_DIR_FORWARD;
3385 s->mv_type = MV_TYPE_FIELD;
3388 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3389 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3390 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3393 case CANDIDATE_MB_TYPE_INTER4V:
3394 s->mv_dir = MV_DIR_FORWARD;
3395 s->mv_type = MV_TYPE_8X8;
3398 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3399 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3402 case CANDIDATE_MB_TYPE_DIRECT:
3403 if (CONFIG_MPEG4_ENCODER) {
3404 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3406 motion_x=s->b_direct_mv_table[xy][0];
3407 motion_y=s->b_direct_mv_table[xy][1];
3408 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3411 case CANDIDATE_MB_TYPE_DIRECT0:
3412 if (CONFIG_MPEG4_ENCODER) {
3413 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3415 ff_mpeg4_set_direct_mv(s, 0, 0);
3418 case CANDIDATE_MB_TYPE_BIDIR:
3419 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3421 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3422 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3423 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3424 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3426 case CANDIDATE_MB_TYPE_BACKWARD:
3427 s->mv_dir = MV_DIR_BACKWARD;
3429 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3430 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3432 case CANDIDATE_MB_TYPE_FORWARD:
3433 s->mv_dir = MV_DIR_FORWARD;
3435 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3436 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3438 case CANDIDATE_MB_TYPE_FORWARD_I:
3439 s->mv_dir = MV_DIR_FORWARD;
3440 s->mv_type = MV_TYPE_FIELD;
3443 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3444 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3445 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3448 case CANDIDATE_MB_TYPE_BACKWARD_I:
3449 s->mv_dir = MV_DIR_BACKWARD;
3450 s->mv_type = MV_TYPE_FIELD;
3453 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3454 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3455 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3458 case CANDIDATE_MB_TYPE_BIDIR_I:
3459 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3460 s->mv_type = MV_TYPE_FIELD;
3462 for(dir=0; dir<2; dir++){
3464 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3465 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3466 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3471 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3474 encode_mb(s, motion_x, motion_y);
3476 // RAL: Update last macroblock type
3477 s->last_mv_dir = s->mv_dir;
3479 if (CONFIG_H263_ENCODER &&
3480 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3481 ff_h263_update_motion_val(s);
3483 ff_mpv_reconstruct_mb(s, s->block);
3486 /* clean the MV table in IPS frames for direct mode in B-frames */
3487 if(s->mb_intra /* && I,P,S_TYPE */){
3488 s->p_mv_table[xy][0]=0;
3489 s->p_mv_table[xy][1]=0;
3492 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3496 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3497 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3499 s->current_picture.encoding_error[0] += sse(
3500 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3501 s->dest[0], w, h, s->linesize);
3502 s->current_picture.encoding_error[1] += sse(
3503 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3504 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3505 s->current_picture.encoding_error[2] += sse(
3506 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3507 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3510 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3511 ff_h263_loop_filter(s);
3513 ff_dlog(s->avctx, "MB %d %d bits\n",
3514 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3518 //not beautiful here but we must write it before flushing so it has to be here
3519 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3520 ff_msmpeg4_encode_ext_header(s);
3524 #if FF_API_RTP_CALLBACK
3525 FF_DISABLE_DEPRECATION_WARNINGS
3526 /* Send the last GOB if RTP */
3527 if (s->avctx->rtp_callback) {
3528 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3529 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3530 /* Call the RTP callback to send the last GOB */
3532 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3534 FF_ENABLE_DEPRECATION_WARNINGS
3540 #define MERGE(field) dst->field += src->field; src->field=0
3541 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3542 MERGE(me.scene_change_score);
3543 MERGE(me.mc_mb_var_sum_temp);
3544 MERGE(me.mb_var_sum_temp);
3547 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3550 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3551 MERGE(dct_count[1]);
3560 MERGE(er.error_count);
3561 MERGE(padding_bug_score);
3562 MERGE(current_picture.encoding_error[0]);
3563 MERGE(current_picture.encoding_error[1]);
3564 MERGE(current_picture.encoding_error[2]);
3566 if (dst->noise_reduction){
3567 for(i=0; i<64; i++){
3568 MERGE(dct_error_sum[0][i]);
3569 MERGE(dct_error_sum[1][i]);
3573 av_assert1(put_bits_count(&src->pb) % 8 ==0);
3574 av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3575 ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3576 flush_put_bits(&dst->pb);
3579 static int estimate_qp(MpegEncContext *s, int dry_run){
3580 if (s->next_lambda){
3581 s->current_picture_ptr->f->quality =
3582 s->current_picture.f->quality = s->next_lambda;
3583 if(!dry_run) s->next_lambda= 0;
3584 } else if (!s->fixed_qscale) {
3585 int quality = ff_rate_estimate_qscale(s, dry_run);
3586 s->current_picture_ptr->f->quality =
3587 s->current_picture.f->quality = quality;
3588 if (s->current_picture.f->quality < 0)
3592 if(s->adaptive_quant){
3593 switch(s->codec_id){
3594 case AV_CODEC_ID_MPEG4:
3595 if (CONFIG_MPEG4_ENCODER)
3596 ff_clean_mpeg4_qscales(s);
3598 case AV_CODEC_ID_H263:
3599 case AV_CODEC_ID_H263P:
3600 case AV_CODEC_ID_FLV1:
3601 if (CONFIG_H263_ENCODER)
3602 ff_clean_h263_qscales(s);
3605 ff_init_qscale_tab(s);
3608 s->lambda= s->lambda_table[0];
3611 s->lambda = s->current_picture.f->quality;
3616 /* must be called before writing the header */
3617 static void set_frame_distances(MpegEncContext * s){
3618 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3619 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3621 if(s->pict_type==AV_PICTURE_TYPE_B){
3622 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3623 av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3625 s->pp_time= s->time - s->last_non_b_time;
3626 s->last_non_b_time= s->time;
3627 av_assert1(s->picture_number==0 || s->pp_time > 0);
3631 static int encode_picture(MpegEncContext *s, int picture_number)
3635 int context_count = s->slice_context_count;
3637 s->picture_number = picture_number;
3639 /* Reset the average MB variance */
3640 s->me.mb_var_sum_temp =
3641 s->me.mc_mb_var_sum_temp = 0;
3643 /* we need to initialize some time vars before we can encode B-frames */
3644 // RAL: Condition added for MPEG1VIDEO
3645 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3646 set_frame_distances(s);
3647 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3648 ff_set_mpeg4_time(s);
3650 s->me.scene_change_score=0;
3652 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3654 if(s->pict_type==AV_PICTURE_TYPE_I){
3655 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3656 else s->no_rounding=0;
3657 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3658 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3659 s->no_rounding ^= 1;
3662 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3663 if (estimate_qp(s,1) < 0)
3665 ff_get_2pass_fcode(s);
3666 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3667 if(s->pict_type==AV_PICTURE_TYPE_B)
3668 s->lambda= s->last_lambda_for[s->pict_type];
3670 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3674 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3675 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3676 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3677 s->q_chroma_intra_matrix = s->q_intra_matrix;
3678 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3681 s->mb_intra=0; //for the rate distortion & bit compare functions
3682 for(i=1; i<context_count; i++){
3683 ret = ff_update_duplicate_context(s->thread_context[i], s);
3691 /* Estimate motion for every MB */
3692 if(s->pict_type != AV_PICTURE_TYPE_I){
3693 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3694 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3695 if (s->pict_type != AV_PICTURE_TYPE_B) {
3696 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3698 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3702 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3703 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3705 for(i=0; i<s->mb_stride*s->mb_height; i++)
3706 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3708 if(!s->fixed_qscale){
3709 /* finding spatial complexity for I-frame rate control */
3710 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3713 for(i=1; i<context_count; i++){
3714 merge_context_after_me(s, s->thread_context[i]);
3716 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3717 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3720 if (s->me.scene_change_score > s->scenechange_threshold &&
3721 s->pict_type == AV_PICTURE_TYPE_P) {
3722 s->pict_type= AV_PICTURE_TYPE_I;
3723 for(i=0; i<s->mb_stride*s->mb_height; i++)
3724 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3725 if(s->msmpeg4_version >= 3)
3727 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3728 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3732 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3733 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3735 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3737 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3738 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3739 s->f_code= FFMAX3(s->f_code, a, b);
3742 ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
3743 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3744 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3748 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3749 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3754 if(s->pict_type==AV_PICTURE_TYPE_B){
3757 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3758 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3759 s->f_code = FFMAX(a, b);
3761 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3762 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3763 s->b_code = FFMAX(a, b);
3765 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3766 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3767 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3768 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3769 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3771 for(dir=0; dir<2; dir++){
3774 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3775 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3776 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3777 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3785 if (estimate_qp(s, 0) < 0)
3788 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3789 s->pict_type == AV_PICTURE_TYPE_I &&
3790 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3791 s->qscale= 3; //reduce clipping problems
3793 if (s->out_format == FMT_MJPEG) {
3794 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3795 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3797 if (s->avctx->intra_matrix) {
3799 luma_matrix = s->avctx->intra_matrix;
3801 if (s->avctx->chroma_intra_matrix)
3802 chroma_matrix = s->avctx->chroma_intra_matrix;
3804 /* for mjpeg, we do include qscale in the matrix */
3806 int j = s->idsp.idct_permutation[i];
3808 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3809 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3811 s->y_dc_scale_table=
3812 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3813 s->chroma_intra_matrix[0] =
3814 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3815 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3816 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3817 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3818 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3821 if(s->codec_id == AV_CODEC_ID_AMV){
3822 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3823 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3825 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3827 s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3828 s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3830 s->y_dc_scale_table= y;
3831 s->c_dc_scale_table= c;
3832 s->intra_matrix[0] = 13;
3833 s->chroma_intra_matrix[0] = 14;
3834 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3835 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3836 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3837 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3841 if (s->out_format == FMT_SPEEDHQ) {
3842 s->y_dc_scale_table=
3843 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3846 //FIXME var duplication
3847 s->current_picture_ptr->f->key_frame =
3848 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3849 s->current_picture_ptr->f->pict_type =
3850 s->current_picture.f->pict_type = s->pict_type;
3852 if (s->current_picture.f->key_frame)
3853 s->picture_in_gop_number=0;
3855 s->mb_x = s->mb_y = 0;
3856 s->last_bits= put_bits_count(&s->pb);
3857 switch(s->out_format) {
3858 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3860 /* s->huffman == HUFFMAN_TABLE_OPTIMAL can only be true for MJPEG. */
3861 if (!CONFIG_MJPEG_ENCODER || s->huffman != HUFFMAN_TABLE_OPTIMAL)
3862 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3863 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3867 if (CONFIG_SPEEDHQ_ENCODER)
3868 ff_speedhq_encode_picture_header(s);
3871 if (CONFIG_H261_ENCODER)
3872 ff_h261_encode_picture_header(s, picture_number);
3875 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3876 ff_wmv2_encode_picture_header(s, picture_number);
3877 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3878 ff_msmpeg4_encode_picture_header(s, picture_number);
3879 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3880 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3883 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3884 ret = ff_rv10_encode_picture_header(s, picture_number);
3888 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3889 ff_rv20_encode_picture_header(s, picture_number);
3890 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3891 ff_flv_encode_picture_header(s, picture_number);
3892 else if (CONFIG_H263_ENCODER)
3893 ff_h263_encode_picture_header(s, picture_number);
3896 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3897 ff_mpeg1_encode_picture_header(s, picture_number);
3902 bits= put_bits_count(&s->pb);
3903 s->header_bits= bits - s->last_bits;
3905 for(i=1; i<context_count; i++){
3906 update_duplicate_context_after_me(s->thread_context[i], s);
3908 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3909 for(i=1; i<context_count; i++){
3910 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3911 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3912 merge_context_after_encode(s, s->thread_context[i]);
3918 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3919 const int intra= s->mb_intra;
3922 s->dct_count[intra]++;
3924 for(i=0; i<64; i++){
3925 int level= block[i];
3929 s->dct_error_sum[intra][i] += level;
3930 level -= s->dct_offset[intra][i];
3931 if(level<0) level=0;
3933 s->dct_error_sum[intra][i] -= level;
3934 level += s->dct_offset[intra][i];
3935 if(level>0) level=0;
3942 static int dct_quantize_trellis_c(MpegEncContext *s,
3943 int16_t *block, int n,
3944 int qscale, int *overflow){
3946 const uint16_t *matrix;
3947 const uint8_t *scantable;
3948 const uint8_t *perm_scantable;
3950 unsigned int threshold1, threshold2;
3962 int coeff_count[64];
3963 int qmul, qadd, start_i, last_non_zero, i, dc;
3964 const int esc_length= s->ac_esc_length;
3966 uint8_t * last_length;
3967 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3970 s->fdsp.fdct(block);
3972 if(s->dct_error_sum)
3973 s->denoise_dct(s, block);
3975 qadd= ((qscale-1)|1)*8;
3977 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3978 else mpeg2_qscale = qscale << 1;
3982 scantable= s->intra_scantable.scantable;
3983 perm_scantable= s->intra_scantable.permutated;
3991 /* For AIC we skip quant/dequant of INTRADC */
3996 /* note: block[0] is assumed to be positive */
3997 block[0] = (block[0] + (q >> 1)) / q;
4000 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4001 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4002 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4003 bias= 1<<(QMAT_SHIFT-1);
4005 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4006 length = s->intra_chroma_ac_vlc_length;
4007 last_length= s->intra_chroma_ac_vlc_last_length;
4009 length = s->intra_ac_vlc_length;
4010 last_length= s->intra_ac_vlc_last_length;
4013 scantable= s->inter_scantable.scantable;
4014 perm_scantable= s->inter_scantable.permutated;
4017 qmat = s->q_inter_matrix[qscale];
4018 matrix = s->inter_matrix;
4019 length = s->inter_ac_vlc_length;
4020 last_length= s->inter_ac_vlc_last_length;
4024 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4025 threshold2= (threshold1<<1);
4027 for(i=63; i>=start_i; i--) {
4028 const int j = scantable[i];
4029 int level = block[j] * qmat[j];
4031 if(((unsigned)(level+threshold1))>threshold2){
4037 for(i=start_i; i<=last_non_zero; i++) {
4038 const int j = scantable[i];
4039 int level = block[j] * qmat[j];
4041 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4042 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4043 if(((unsigned)(level+threshold1))>threshold2){
4045 level= (bias + level)>>QMAT_SHIFT;
4047 coeff[1][i]= level-1;
4048 // coeff[2][k]= level-2;
4050 level= (bias - level)>>QMAT_SHIFT;
4051 coeff[0][i]= -level;
4052 coeff[1][i]= -level+1;
4053 // coeff[2][k]= -level+2;
4055 coeff_count[i]= FFMIN(level, 2);
4056 av_assert2(coeff_count[i]);
4059 coeff[0][i]= (level>>31)|1;
4064 *overflow= s->max_qcoeff < max; //overflow might have happened
4066 if(last_non_zero < start_i){
4067 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4068 return last_non_zero;
4071 score_tab[start_i]= 0;
4072 survivor[0]= start_i;
4075 for(i=start_i; i<=last_non_zero; i++){
4076 int level_index, j, zero_distortion;
4077 int dct_coeff= FFABS(block[ scantable[i] ]);
4078 int best_score=256*256*256*120;
4080 if (s->fdsp.fdct == ff_fdct_ifast)
4081 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4082 zero_distortion= dct_coeff*dct_coeff;
4084 for(level_index=0; level_index < coeff_count[i]; level_index++){
4086 int level= coeff[level_index][i];
4087 const int alevel= FFABS(level);
4092 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4093 unquant_coeff= alevel*qmul + qadd;
4094 } else if(s->out_format == FMT_MJPEG) {
4095 j = s->idsp.idct_permutation[scantable[i]];
4096 unquant_coeff = alevel * matrix[j] * 8;
4098 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4100 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4101 unquant_coeff = (unquant_coeff - 1) | 1;
4103 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4104 unquant_coeff = (unquant_coeff - 1) | 1;
4109 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4111 if((level&(~127)) == 0){
4112 for(j=survivor_count-1; j>=0; j--){
4113 int run= i - survivor[j];
4114 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4115 score += score_tab[i-run];
4117 if(score < best_score){
4120 level_tab[i+1]= level-64;
4124 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4125 for(j=survivor_count-1; j>=0; j--){
4126 int run= i - survivor[j];
4127 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4128 score += score_tab[i-run];
4129 if(score < last_score){
4132 last_level= level-64;
4138 distortion += esc_length*lambda;
4139 for(j=survivor_count-1; j>=0; j--){
4140 int run= i - survivor[j];
4141 int score= distortion + score_tab[i-run];
4143 if(score < best_score){
4146 level_tab[i+1]= level-64;
4150 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4151 for(j=survivor_count-1; j>=0; j--){
4152 int run= i - survivor[j];
4153 int score= distortion + score_tab[i-run];
4154 if(score < last_score){
4157 last_level= level-64;
4165 score_tab[i+1]= best_score;
4167 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4168 if(last_non_zero <= 27){
4169 for(; survivor_count; survivor_count--){
4170 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4174 for(; survivor_count; survivor_count--){
4175 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4180 survivor[ survivor_count++ ]= i+1;
4183 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4184 last_score= 256*256*256*120;
4185 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4186 int score= score_tab[i];
4188 score += lambda * 2; // FIXME more exact?
4190 if(score < last_score){
4193 last_level= level_tab[i];
4194 last_run= run_tab[i];
4199 s->coded_score[n] = last_score;
4201 dc= FFABS(block[0]);
4202 last_non_zero= last_i - 1;
4203 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4205 if(last_non_zero < start_i)
4206 return last_non_zero;
4208 if(last_non_zero == 0 && start_i == 0){
4210 int best_score= dc * dc;
4212 for(i=0; i<coeff_count[0]; i++){
4213 int level= coeff[i][0];
4214 int alevel= FFABS(level);
4215 int unquant_coeff, score, distortion;
4217 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4218 unquant_coeff= (alevel*qmul + qadd)>>3;
4220 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4221 unquant_coeff = (unquant_coeff - 1) | 1;
4223 unquant_coeff = (unquant_coeff + 4) >> 3;
4224 unquant_coeff<<= 3 + 3;
4226 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4228 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4229 else score= distortion + esc_length*lambda;
4231 if(score < best_score){
4233 best_level= level - 64;
4236 block[0]= best_level;
4237 s->coded_score[n] = best_score - dc*dc;
4238 if(best_level == 0) return -1;
4239 else return last_non_zero;
4243 av_assert2(last_level);
4245 block[ perm_scantable[last_non_zero] ]= last_level;
4248 for(; i>start_i; i -= run_tab[i] + 1){
4249 block[ perm_scantable[i-1] ]= level_tab[i];
4252 return last_non_zero;
4255 static int16_t basis[64][64];
4257 static void build_basis(uint8_t *perm){
4264 double s= 0.25*(1<<BASIS_SHIFT);
4266 int perm_index= perm[index];
4267 if(i==0) s*= sqrt(0.5);
4268 if(j==0) s*= sqrt(0.5);
4269 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4276 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4277 int16_t *block, int16_t *weight, int16_t *orig,
4280 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4281 const uint8_t *scantable;
4282 const uint8_t *perm_scantable;
4283 // unsigned int threshold1, threshold2;
4288 int qmul, qadd, start_i, last_non_zero, i, dc;
4290 uint8_t * last_length;
4292 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4294 if(basis[0][0] == 0)
4295 build_basis(s->idsp.idct_permutation);
4300 scantable= s->intra_scantable.scantable;
4301 perm_scantable= s->intra_scantable.permutated;
4308 /* For AIC we skip quant/dequant of INTRADC */
4312 q <<= RECON_SHIFT-3;
4313 /* note: block[0] is assumed to be positive */
4315 // block[0] = (block[0] + (q >> 1)) / q;
4317 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4318 // bias= 1<<(QMAT_SHIFT-1);
4319 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4320 length = s->intra_chroma_ac_vlc_length;
4321 last_length= s->intra_chroma_ac_vlc_last_length;
4323 length = s->intra_ac_vlc_length;
4324 last_length= s->intra_ac_vlc_last_length;
4327 scantable= s->inter_scantable.scantable;
4328 perm_scantable= s->inter_scantable.permutated;
4331 length = s->inter_ac_vlc_length;
4332 last_length= s->inter_ac_vlc_last_length;
4334 last_non_zero = s->block_last_index[n];
4336 dc += (1<<(RECON_SHIFT-1));
4337 for(i=0; i<64; i++){
4338 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4342 for(i=0; i<64; i++){
4347 w= FFABS(weight[i]) + qns*one;
4348 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4351 // w=weight[i] = (63*qns + (w/2)) / w;
4354 av_assert2(w<(1<<6));
4357 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4361 for(i=start_i; i<=last_non_zero; i++){
4362 int j= perm_scantable[i];
4363 const int level= block[j];
4367 if(level<0) coeff= qmul*level - qadd;
4368 else coeff= qmul*level + qadd;
4369 run_tab[rle_index++]=run;
4372 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4379 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4382 int run2, best_unquant_change=0, analyze_gradient;
4383 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4385 if(analyze_gradient){
4386 for(i=0; i<64; i++){
4389 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4395 const int level= block[0];
4396 int change, old_coeff;
4398 av_assert2(s->mb_intra);
4402 for(change=-1; change<=1; change+=2){
4403 int new_level= level + change;
4404 int score, new_coeff;
4406 new_coeff= q*new_level;
4407 if(new_coeff >= 2048 || new_coeff < 0)
4410 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4411 new_coeff - old_coeff);
4412 if(score<best_score){
4415 best_change= change;
4416 best_unquant_change= new_coeff - old_coeff;
4423 run2= run_tab[rle_index++];
4427 for(i=start_i; i<64; i++){
4428 int j= perm_scantable[i];
4429 const int level= block[j];
4430 int change, old_coeff;
4432 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4436 if(level<0) old_coeff= qmul*level - qadd;
4437 else old_coeff= qmul*level + qadd;
4438 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4442 av_assert2(run2>=0 || i >= last_non_zero );
4445 for(change=-1; change<=1; change+=2){
4446 int new_level= level + change;
4447 int score, new_coeff, unquant_change;
4450 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4454 if(new_level<0) new_coeff= qmul*new_level - qadd;
4455 else new_coeff= qmul*new_level + qadd;
4456 if(new_coeff >= 2048 || new_coeff <= -2048)
4458 //FIXME check for overflow
4461 if(level < 63 && level > -63){
4462 if(i < last_non_zero)
4463 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4464 - length[UNI_AC_ENC_INDEX(run, level+64)];
4466 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4467 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4470 av_assert2(FFABS(new_level)==1);
4472 if(analyze_gradient){
4473 int g= d1[ scantable[i] ];
4474 if(g && (g^new_level) >= 0)
4478 if(i < last_non_zero){
4479 int next_i= i + run2 + 1;
4480 int next_level= block[ perm_scantable[next_i] ] + 64;
4482 if(next_level&(~127))
4485 if(next_i < last_non_zero)
4486 score += length[UNI_AC_ENC_INDEX(run, 65)]
4487 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4488 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4490 score += length[UNI_AC_ENC_INDEX(run, 65)]
4491 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4492 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4494 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4496 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4497 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4503 av_assert2(FFABS(level)==1);
4505 if(i < last_non_zero){
4506 int next_i= i + run2 + 1;
4507 int next_level= block[ perm_scantable[next_i] ] + 64;
4509 if(next_level&(~127))
4512 if(next_i < last_non_zero)
4513 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4514 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4515 - length[UNI_AC_ENC_INDEX(run, 65)];
4517 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4518 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4519 - length[UNI_AC_ENC_INDEX(run, 65)];
4521 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4523 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4524 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4531 unquant_change= new_coeff - old_coeff;
4532 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4534 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4536 if(score<best_score){
4539 best_change= change;
4540 best_unquant_change= unquant_change;
4544 prev_level= level + 64;
4545 if(prev_level&(~127))
4555 int j= perm_scantable[ best_coeff ];
4557 block[j] += best_change;
4559 if(best_coeff > last_non_zero){
4560 last_non_zero= best_coeff;
4561 av_assert2(block[j]);
4563 for(; last_non_zero>=start_i; last_non_zero--){
4564 if(block[perm_scantable[last_non_zero]])
4571 for(i=start_i; i<=last_non_zero; i++){
4572 int j= perm_scantable[i];
4573 const int level= block[j];
4576 run_tab[rle_index++]=run;
4583 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4589 return last_non_zero;
4593 * Permute an 8x8 block according to permutation.
4594 * @param block the block which will be permuted according to
4595 * the given permutation vector
4596 * @param permutation the permutation vector
4597 * @param last the last non zero coefficient in scantable order, used to
4598 * speed the permutation up
4599 * @param scantable the used scantable, this is only used to speed the
4600 * permutation up, the block is not (inverse) permutated
4601 * to scantable order!
4603 void ff_block_permute(int16_t *block, uint8_t *permutation,
4604 const uint8_t *scantable, int last)
4611 //FIXME it is ok but not clean and might fail for some permutations
4612 // if (permutation[1] == 1)
4615 for (i = 0; i <= last; i++) {
4616 const int j = scantable[i];
4621 for (i = 0; i <= last; i++) {
4622 const int j = scantable[i];
4623 const int perm_j = permutation[j];
4624 block[perm_j] = temp[j];
4628 int ff_dct_quantize_c(MpegEncContext *s,
4629 int16_t *block, int n,
4630 int qscale, int *overflow)
4632 int i, j, level, last_non_zero, q, start_i;
4634 const uint8_t *scantable;
4637 unsigned int threshold1, threshold2;
4639 s->fdsp.fdct(block);
4641 if(s->dct_error_sum)
4642 s->denoise_dct(s, block);
4645 scantable= s->intra_scantable.scantable;
4653 /* For AIC we skip quant/dequant of INTRADC */
4656 /* note: block[0] is assumed to be positive */
4657 block[0] = (block[0] + (q >> 1)) / q;
4660 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4661 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4663 scantable= s->inter_scantable.scantable;
4666 qmat = s->q_inter_matrix[qscale];
4667 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4669 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4670 threshold2= (threshold1<<1);
4671 for(i=63;i>=start_i;i--) {
4673 level = block[j] * qmat[j];
4675 if(((unsigned)(level+threshold1))>threshold2){
4682 for(i=start_i; i<=last_non_zero; i++) {
4684 level = block[j] * qmat[j];
4686 // if( bias+level >= (1<<QMAT_SHIFT)
4687 // || bias-level >= (1<<QMAT_SHIFT)){
4688 if(((unsigned)(level+threshold1))>threshold2){
4690 level= (bias + level)>>QMAT_SHIFT;
4693 level= (bias - level)>>QMAT_SHIFT;
4701 *overflow= s->max_qcoeff < max; //overflow might have happened
4703 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4704 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4705 ff_block_permute(block, s->idsp.idct_permutation,
4706 scantable, last_non_zero);
4708 return last_non_zero;
4711 #define OFFSET(x) offsetof(MpegEncContext, x)
4712 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4713 static const AVOption h263_options[] = {
4714 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4715 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4720 static const AVClass h263_class = {
4721 .class_name = "H.263 encoder",
4722 .item_name = av_default_item_name,
4723 .option = h263_options,
4724 .version = LIBAVUTIL_VERSION_INT,
4727 AVCodec ff_h263_encoder = {
4729 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4730 .type = AVMEDIA_TYPE_VIDEO,
4731 .id = AV_CODEC_ID_H263,
4732 .priv_data_size = sizeof(MpegEncContext),
4733 .init = ff_mpv_encode_init,
4734 .encode2 = ff_mpv_encode_picture,
4735 .close = ff_mpv_encode_end,
4736 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4737 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4738 .priv_class = &h263_class,
4741 static const AVOption h263p_options[] = {
4742 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4743 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4744 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4745 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4749 static const AVClass h263p_class = {
4750 .class_name = "H.263p encoder",
4751 .item_name = av_default_item_name,
4752 .option = h263p_options,
4753 .version = LIBAVUTIL_VERSION_INT,
4756 AVCodec ff_h263p_encoder = {
4758 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4759 .type = AVMEDIA_TYPE_VIDEO,
4760 .id = AV_CODEC_ID_H263P,
4761 .priv_data_size = sizeof(MpegEncContext),
4762 .init = ff_mpv_encode_init,
4763 .encode2 = ff_mpv_encode_picture,
4764 .close = ff_mpv_encode_end,
4765 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4766 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4767 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4768 .priv_class = &h263p_class,
4771 static const AVClass msmpeg4v2_class = {
4772 .class_name = "msmpeg4v2 encoder",
4773 .item_name = av_default_item_name,
4774 .option = ff_mpv_generic_options,
4775 .version = LIBAVUTIL_VERSION_INT,
4778 AVCodec ff_msmpeg4v2_encoder = {
4779 .name = "msmpeg4v2",
4780 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4781 .type = AVMEDIA_TYPE_VIDEO,
4782 .id = AV_CODEC_ID_MSMPEG4V2,
4783 .priv_data_size = sizeof(MpegEncContext),
4784 .init = ff_mpv_encode_init,
4785 .encode2 = ff_mpv_encode_picture,
4786 .close = ff_mpv_encode_end,
4787 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4788 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4789 .priv_class = &msmpeg4v2_class,
4792 static const AVClass msmpeg4v3_class = {
4793 .class_name = "msmpeg4v3 encoder",
4794 .item_name = av_default_item_name,
4795 .option = ff_mpv_generic_options,
4796 .version = LIBAVUTIL_VERSION_INT,
4799 AVCodec ff_msmpeg4v3_encoder = {
4801 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4802 .type = AVMEDIA_TYPE_VIDEO,
4803 .id = AV_CODEC_ID_MSMPEG4V3,
4804 .priv_data_size = sizeof(MpegEncContext),
4805 .init = ff_mpv_encode_init,
4806 .encode2 = ff_mpv_encode_picture,
4807 .close = ff_mpv_encode_end,
4808 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4809 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4810 .priv_class = &msmpeg4v3_class,
4813 static const AVClass wmv1_class = {
4814 .class_name = "wmv1 encoder",
4815 .item_name = av_default_item_name,
4816 .option = ff_mpv_generic_options,
4817 .version = LIBAVUTIL_VERSION_INT,
4820 AVCodec ff_wmv1_encoder = {
4822 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4823 .type = AVMEDIA_TYPE_VIDEO,
4824 .id = AV_CODEC_ID_WMV1,
4825 .priv_data_size = sizeof(MpegEncContext),
4826 .init = ff_mpv_encode_init,
4827 .encode2 = ff_mpv_encode_picture,
4828 .close = ff_mpv_encode_end,
4829 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4830 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4831 .priv_class = &wmv1_class,