2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/mem_internal.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/thread.h"
47 #include "mpegvideo.h"
48 #include "mpegvideodata.h"
52 #include "mjpegenc_common.h"
54 #include "mpegutils.h"
56 #include "speedhqenc.h"
58 #include "pixblockdsp.h"
62 #include "aandcttab.h"
64 #include "mpeg4video.h"
66 #include "bytestream.h"
69 #include "packet_internal.h"
73 #define QUANT_BIAS_SHIFT 8
75 #define QMAT_SHIFT_MMX 16
78 static int encode_picture(MpegEncContext *s, int picture_number);
79 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
80 static int sse_mb(MpegEncContext *s);
81 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
82 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
84 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
85 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
87 const AVOption ff_mpv_generic_options[] = {
92 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
93 uint16_t (*qmat16)[2][64],
94 const uint16_t *quant_matrix,
95 int bias, int qmin, int qmax, int intra)
97 FDCTDSPContext *fdsp = &s->fdsp;
101 for (qscale = qmin; qscale <= qmax; qscale++) {
105 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
106 else qscale2 = qscale << 1;
108 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
110 fdsp->fdct == ff_faandct ||
111 #endif /* CONFIG_FAANDCT */
112 fdsp->fdct == ff_jpeg_fdct_islow_10) {
113 for (i = 0; i < 64; i++) {
114 const int j = s->idsp.idct_permutation[i];
115 int64_t den = (int64_t) qscale2 * quant_matrix[j];
116 /* 16 <= qscale * quant_matrix[i] <= 7905
117 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
118 * 19952 <= x <= 249205026
119 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
120 * 3444240 >= (1 << 36) / (x) >= 275 */
122 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
124 } else if (fdsp->fdct == ff_fdct_ifast) {
125 for (i = 0; i < 64; i++) {
126 const int j = s->idsp.idct_permutation[i];
127 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
128 /* 16 <= qscale * quant_matrix[i] <= 7905
129 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
130 * 19952 <= x <= 249205026
131 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
132 * 3444240 >= (1 << 36) / (x) >= 275 */
134 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
137 for (i = 0; i < 64; i++) {
138 const int j = s->idsp.idct_permutation[i];
139 int64_t den = (int64_t) qscale2 * quant_matrix[j];
140 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
141 * Assume x = qscale * quant_matrix[i]
143 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
144 * so 32768 >= (1 << 19) / (x) >= 67 */
145 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
146 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
147 // (qscale * quant_matrix[i]);
148 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
150 if (qmat16[qscale][0][i] == 0 ||
151 qmat16[qscale][0][i] == 128 * 256)
152 qmat16[qscale][0][i] = 128 * 256 - 1;
153 qmat16[qscale][1][i] =
154 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
155 qmat16[qscale][0][i]);
159 for (i = intra; i < 64; i++) {
161 if (fdsp->fdct == ff_fdct_ifast) {
162 max = (8191LL * ff_aanscales[i]) >> 14;
164 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
170 av_log(s->avctx, AV_LOG_INFO,
171 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
176 static inline void update_qscale(MpegEncContext *s)
178 if (s->q_scale_type == 1 && 0) {
180 int bestdiff=INT_MAX;
183 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
184 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
185 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
186 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
188 if (diff < bestdiff) {
195 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
196 (FF_LAMBDA_SHIFT + 7);
197 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
200 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
204 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
210 for (i = 0; i < 64; i++) {
211 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
218 * init s->current_picture.qscale_table from s->lambda_table
220 void ff_init_qscale_tab(MpegEncContext *s)
222 int8_t * const qscale_table = s->current_picture.qscale_table;
225 for (i = 0; i < s->mb_num; i++) {
226 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
227 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
228 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
233 static void update_duplicate_context_after_me(MpegEncContext *dst,
236 #define COPY(a) dst->a= src->a
238 COPY(current_picture);
244 COPY(picture_in_gop_number);
245 COPY(gop_picture_number);
246 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
247 COPY(progressive_frame); // FIXME don't set in encode_header
248 COPY(partitioned_frame); // FIXME don't set in encode_header
252 static void mpv_encode_init_static(void)
254 for (int i = -16; i < 16; i++)
255 default_fcode_tab[i + MAX_MV] = 1;
259 * Set the given MpegEncContext to defaults for encoding.
260 * the changed fields will not depend upon the prior state of the MpegEncContext.
262 static void mpv_encode_defaults(MpegEncContext *s)
264 static AVOnce init_static_once = AV_ONCE_INIT;
266 ff_mpv_common_defaults(s);
268 ff_thread_once(&init_static_once, mpv_encode_init_static);
270 s->me.mv_penalty = default_mv_penalty;
271 s->fcode_tab = default_fcode_tab;
273 s->input_picture_number = 0;
274 s->picture_in_gop_number = 0;
277 av_cold int ff_dct_encode_init(MpegEncContext *s)
280 ff_dct_encode_init_x86(s);
282 if (CONFIG_H263_ENCODER)
283 ff_h263dsp_init(&s->h263dsp);
284 if (!s->dct_quantize)
285 s->dct_quantize = ff_dct_quantize_c;
287 s->denoise_dct = denoise_dct_c;
288 s->fast_dct_quantize = s->dct_quantize;
289 if (s->avctx->trellis)
290 s->dct_quantize = dct_quantize_trellis_c;
295 /* init video encoder */
296 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
298 MpegEncContext *s = avctx->priv_data;
299 AVCPBProperties *cpb_props;
302 mpv_encode_defaults(s);
304 switch (avctx->pix_fmt) {
305 case AV_PIX_FMT_YUVJ444P:
306 case AV_PIX_FMT_YUV444P:
307 s->chroma_format = CHROMA_444;
309 case AV_PIX_FMT_YUVJ422P:
310 case AV_PIX_FMT_YUV422P:
311 s->chroma_format = CHROMA_422;
313 case AV_PIX_FMT_YUVJ420P:
314 case AV_PIX_FMT_YUV420P:
316 s->chroma_format = CHROMA_420;
320 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
322 #if FF_API_PRIVATE_OPT
323 FF_DISABLE_DEPRECATION_WARNINGS
324 if (avctx->rtp_payload_size)
325 s->rtp_payload_size = avctx->rtp_payload_size;
326 if (avctx->me_penalty_compensation)
327 s->me_penalty_compensation = avctx->me_penalty_compensation;
329 s->me_pre = avctx->pre_me;
330 FF_ENABLE_DEPRECATION_WARNINGS
333 s->bit_rate = avctx->bit_rate;
334 s->width = avctx->width;
335 s->height = avctx->height;
336 if (avctx->gop_size > 600 &&
337 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
338 av_log(avctx, AV_LOG_WARNING,
339 "keyframe interval too large!, reducing it from %d to %d\n",
340 avctx->gop_size, 600);
341 avctx->gop_size = 600;
343 s->gop_size = avctx->gop_size;
345 if (avctx->max_b_frames > MAX_B_FRAMES) {
346 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
347 "is %d.\n", MAX_B_FRAMES);
348 avctx->max_b_frames = MAX_B_FRAMES;
350 s->max_b_frames = avctx->max_b_frames;
351 s->codec_id = avctx->codec->id;
352 s->strict_std_compliance = avctx->strict_std_compliance;
353 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
354 s->rtp_mode = !!s->rtp_payload_size;
355 s->intra_dc_precision = avctx->intra_dc_precision;
357 // workaround some differences between how applications specify dc precision
358 if (s->intra_dc_precision < 0) {
359 s->intra_dc_precision += 8;
360 } else if (s->intra_dc_precision >= 8)
361 s->intra_dc_precision -= 8;
363 if (s->intra_dc_precision < 0) {
364 av_log(avctx, AV_LOG_ERROR,
365 "intra dc precision must be positive, note some applications use"
366 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
367 return AVERROR(EINVAL);
370 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
373 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
374 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
375 return AVERROR(EINVAL);
377 s->user_specified_pts = AV_NOPTS_VALUE;
379 if (s->gop_size <= 1) {
387 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
389 s->adaptive_quant = (avctx->lumi_masking ||
390 avctx->dark_masking ||
391 avctx->temporal_cplx_masking ||
392 avctx->spatial_cplx_masking ||
395 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
398 s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
400 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
401 switch(avctx->codec_id) {
402 case AV_CODEC_ID_MPEG1VIDEO:
403 case AV_CODEC_ID_MPEG2VIDEO:
404 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
406 case AV_CODEC_ID_MPEG4:
407 case AV_CODEC_ID_MSMPEG4V1:
408 case AV_CODEC_ID_MSMPEG4V2:
409 case AV_CODEC_ID_MSMPEG4V3:
410 if (avctx->rc_max_rate >= 15000000) {
411 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
412 } else if(avctx->rc_max_rate >= 2000000) {
413 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
414 } else if(avctx->rc_max_rate >= 384000) {
415 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
417 avctx->rc_buffer_size = 40;
418 avctx->rc_buffer_size *= 16384;
421 if (avctx->rc_buffer_size) {
422 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
426 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
427 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
428 return AVERROR(EINVAL);
431 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
432 av_log(avctx, AV_LOG_INFO,
433 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
436 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
437 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
438 return AVERROR(EINVAL);
441 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
442 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
443 return AVERROR(EINVAL);
446 if (avctx->rc_max_rate &&
447 avctx->rc_max_rate == avctx->bit_rate &&
448 avctx->rc_max_rate != avctx->rc_min_rate) {
449 av_log(avctx, AV_LOG_INFO,
450 "impossible bitrate constraints, this will fail\n");
453 if (avctx->rc_buffer_size &&
454 avctx->bit_rate * (int64_t)avctx->time_base.num >
455 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
456 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
457 return AVERROR(EINVAL);
460 if (!s->fixed_qscale &&
461 avctx->bit_rate * av_q2d(avctx->time_base) >
462 avctx->bit_rate_tolerance) {
463 av_log(avctx, AV_LOG_WARNING,
464 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
465 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
468 if (avctx->rc_max_rate &&
469 avctx->rc_min_rate == avctx->rc_max_rate &&
470 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
471 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
472 90000LL * (avctx->rc_buffer_size - 1) >
473 avctx->rc_max_rate * 0xFFFFLL) {
474 av_log(avctx, AV_LOG_INFO,
475 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
476 "specified vbv buffer is too large for the given bitrate!\n");
479 if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
480 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
481 s->codec_id != AV_CODEC_ID_FLV1) {
482 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
483 return AVERROR(EINVAL);
486 if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
487 av_log(avctx, AV_LOG_ERROR,
488 "OBMC is only supported with simple mb decision\n");
489 return AVERROR(EINVAL);
492 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
493 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
494 return AVERROR(EINVAL);
497 if (s->max_b_frames &&
498 s->codec_id != AV_CODEC_ID_MPEG4 &&
499 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
500 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
501 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
502 return AVERROR(EINVAL);
504 if (s->max_b_frames < 0) {
505 av_log(avctx, AV_LOG_ERROR,
506 "max b frames must be 0 or positive for mpegvideo based encoders\n");
507 return AVERROR(EINVAL);
510 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
511 s->codec_id == AV_CODEC_ID_H263 ||
512 s->codec_id == AV_CODEC_ID_H263P) &&
513 (avctx->sample_aspect_ratio.num > 255 ||
514 avctx->sample_aspect_ratio.den > 255)) {
515 av_log(avctx, AV_LOG_WARNING,
516 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
517 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
518 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
519 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
522 if ((s->codec_id == AV_CODEC_ID_H263 ||
523 s->codec_id == AV_CODEC_ID_H263P) &&
524 (avctx->width > 2048 ||
525 avctx->height > 1152 )) {
526 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
527 return AVERROR(EINVAL);
529 if ((s->codec_id == AV_CODEC_ID_H263 ||
530 s->codec_id == AV_CODEC_ID_H263P) &&
531 ((avctx->width &3) ||
532 (avctx->height&3) )) {
533 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
534 return AVERROR(EINVAL);
537 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
538 (avctx->width > 4095 ||
539 avctx->height > 4095 )) {
540 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
541 return AVERROR(EINVAL);
544 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
545 (avctx->width > 16383 ||
546 avctx->height > 16383 )) {
547 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
548 return AVERROR(EINVAL);
551 if (s->codec_id == AV_CODEC_ID_RV10 &&
553 avctx->height&15 )) {
554 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
555 return AVERROR(EINVAL);
558 if (s->codec_id == AV_CODEC_ID_RV20 &&
561 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
562 return AVERROR(EINVAL);
565 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
566 s->codec_id == AV_CODEC_ID_WMV2) &&
568 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
569 return AVERROR(EINVAL);
572 if ((avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
573 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
574 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
575 return AVERROR(EINVAL);
578 #if FF_API_PRIVATE_OPT
579 FF_DISABLE_DEPRECATION_WARNINGS
580 if (avctx->mpeg_quant)
582 FF_ENABLE_DEPRECATION_WARNINGS
585 // FIXME mpeg2 uses that too
586 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
587 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
588 av_log(avctx, AV_LOG_ERROR,
589 "mpeg2 style quantization not supported by codec\n");
590 return AVERROR(EINVAL);
593 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
594 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
595 return AVERROR(EINVAL);
598 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
599 avctx->mb_decision != FF_MB_DECISION_RD) {
600 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
601 return AVERROR(EINVAL);
604 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
605 (s->codec_id == AV_CODEC_ID_AMV ||
606 s->codec_id == AV_CODEC_ID_MJPEG)) {
607 // Used to produce garbage with MJPEG.
608 av_log(avctx, AV_LOG_ERROR,
609 "QP RD is no longer compatible with MJPEG or AMV\n");
610 return AVERROR(EINVAL);
613 #if FF_API_PRIVATE_OPT
614 FF_DISABLE_DEPRECATION_WARNINGS
615 if (avctx->scenechange_threshold)
616 s->scenechange_threshold = avctx->scenechange_threshold;
617 FF_ENABLE_DEPRECATION_WARNINGS
620 if (s->scenechange_threshold < 1000000000 &&
621 (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
622 av_log(avctx, AV_LOG_ERROR,
623 "closed gop with scene change detection are not supported yet, "
624 "set threshold to 1000000000\n");
625 return AVERROR_PATCHWELCOME;
628 if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
629 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
630 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
631 av_log(avctx, AV_LOG_ERROR,
632 "low delay forcing is only available for mpeg2, "
633 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
634 return AVERROR(EINVAL);
636 if (s->max_b_frames != 0) {
637 av_log(avctx, AV_LOG_ERROR,
638 "B-frames cannot be used with low delay\n");
639 return AVERROR(EINVAL);
643 if (s->q_scale_type == 1) {
644 if (avctx->qmax > 28) {
645 av_log(avctx, AV_LOG_ERROR,
646 "non linear quant only supports qmax <= 28 currently\n");
647 return AVERROR_PATCHWELCOME;
651 if (avctx->slices > 1 &&
652 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
653 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
654 return AVERROR(EINVAL);
657 if (avctx->thread_count > 1 &&
658 s->codec_id != AV_CODEC_ID_MPEG4 &&
659 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
660 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
661 s->codec_id != AV_CODEC_ID_MJPEG &&
662 (s->codec_id != AV_CODEC_ID_H263P)) {
663 av_log(avctx, AV_LOG_ERROR,
664 "multi threaded encoding not supported by codec\n");
665 return AVERROR_PATCHWELCOME;
668 if (avctx->thread_count < 1) {
669 av_log(avctx, AV_LOG_ERROR,
670 "automatic thread number detection not supported by codec, "
672 return AVERROR_PATCHWELCOME;
675 if (!avctx->time_base.den || !avctx->time_base.num) {
676 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
677 return AVERROR(EINVAL);
680 #if FF_API_PRIVATE_OPT
681 FF_DISABLE_DEPRECATION_WARNINGS
682 if (avctx->b_frame_strategy)
683 s->b_frame_strategy = avctx->b_frame_strategy;
684 if (avctx->b_sensitivity != 40)
685 s->b_sensitivity = avctx->b_sensitivity;
686 FF_ENABLE_DEPRECATION_WARNINGS
689 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
690 av_log(avctx, AV_LOG_INFO,
691 "notice: b_frame_strategy only affects the first pass\n");
692 s->b_frame_strategy = 0;
695 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
697 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
698 avctx->time_base.den /= i;
699 avctx->time_base.num /= i;
703 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
704 // (a + x * 3 / 8) / x
705 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
706 s->inter_quant_bias = 0;
708 s->intra_quant_bias = 0;
710 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
713 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
714 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
715 return AVERROR(EINVAL);
718 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
720 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
721 avctx->time_base.den > (1 << 16) - 1) {
722 av_log(avctx, AV_LOG_ERROR,
723 "timebase %d/%d not supported by MPEG 4 standard, "
724 "the maximum admitted value for the timebase denominator "
725 "is %d\n", avctx->time_base.num, avctx->time_base.den,
727 return AVERROR(EINVAL);
729 s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
731 switch (avctx->codec->id) {
732 case AV_CODEC_ID_MPEG1VIDEO:
733 s->out_format = FMT_MPEG1;
734 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
735 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
737 case AV_CODEC_ID_MPEG2VIDEO:
738 s->out_format = FMT_MPEG1;
739 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
740 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
743 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
744 case AV_CODEC_ID_MJPEG:
745 case AV_CODEC_ID_AMV:
746 s->out_format = FMT_MJPEG;
747 s->intra_only = 1; /* force intra only for jpeg */
748 if ((ret = ff_mjpeg_encode_init(s)) < 0)
754 case AV_CODEC_ID_SPEEDHQ:
755 s->out_format = FMT_SPEEDHQ;
756 s->intra_only = 1; /* force intra only for SHQ */
757 if (!CONFIG_SPEEDHQ_ENCODER)
758 return AVERROR_ENCODER_NOT_FOUND;
759 if ((ret = ff_speedhq_encode_init(s)) < 0)
764 case AV_CODEC_ID_H261:
765 if (!CONFIG_H261_ENCODER)
766 return AVERROR_ENCODER_NOT_FOUND;
767 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
768 av_log(avctx, AV_LOG_ERROR,
769 "The specified picture size of %dx%d is not valid for the "
770 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
771 s->width, s->height);
772 return AVERROR(EINVAL);
774 s->out_format = FMT_H261;
777 s->rtp_mode = 0; /* Sliced encoding not supported */
779 case AV_CODEC_ID_H263:
780 if (!CONFIG_H263_ENCODER)
781 return AVERROR_ENCODER_NOT_FOUND;
782 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
783 s->width, s->height) == 8) {
784 av_log(avctx, AV_LOG_ERROR,
785 "The specified picture size of %dx%d is not valid for "
786 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
787 "352x288, 704x576, and 1408x1152. "
788 "Try H.263+.\n", s->width, s->height);
789 return AVERROR(EINVAL);
791 s->out_format = FMT_H263;
795 case AV_CODEC_ID_H263P:
796 s->out_format = FMT_H263;
799 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
800 s->modified_quant = s->h263_aic;
801 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
802 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
805 /* These are just to be sure */
809 case AV_CODEC_ID_FLV1:
810 s->out_format = FMT_H263;
811 s->h263_flv = 2; /* format = 1; 11-bit codes */
812 s->unrestricted_mv = 1;
813 s->rtp_mode = 0; /* don't allow GOB */
817 case AV_CODEC_ID_RV10:
818 s->out_format = FMT_H263;
822 case AV_CODEC_ID_RV20:
823 s->out_format = FMT_H263;
826 s->modified_quant = 1;
830 s->unrestricted_mv = 0;
832 case AV_CODEC_ID_MPEG4:
833 s->out_format = FMT_H263;
835 s->unrestricted_mv = 1;
836 s->low_delay = s->max_b_frames ? 0 : 1;
837 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
839 case AV_CODEC_ID_MSMPEG4V2:
840 s->out_format = FMT_H263;
842 s->unrestricted_mv = 1;
843 s->msmpeg4_version = 2;
847 case AV_CODEC_ID_MSMPEG4V3:
848 s->out_format = FMT_H263;
850 s->unrestricted_mv = 1;
851 s->msmpeg4_version = 3;
852 s->flipflop_rounding = 1;
856 case AV_CODEC_ID_WMV1:
857 s->out_format = FMT_H263;
859 s->unrestricted_mv = 1;
860 s->msmpeg4_version = 4;
861 s->flipflop_rounding = 1;
865 case AV_CODEC_ID_WMV2:
866 s->out_format = FMT_H263;
868 s->unrestricted_mv = 1;
869 s->msmpeg4_version = 5;
870 s->flipflop_rounding = 1;
875 return AVERROR(EINVAL);
878 #if FF_API_PRIVATE_OPT
879 FF_DISABLE_DEPRECATION_WARNINGS
880 if (avctx->noise_reduction)
881 s->noise_reduction = avctx->noise_reduction;
882 FF_ENABLE_DEPRECATION_WARNINGS
885 avctx->has_b_frames = !s->low_delay;
889 s->progressive_frame =
890 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
891 AV_CODEC_FLAG_INTERLACED_ME) ||
896 if ((ret = ff_mpv_common_init(s)) < 0)
899 ff_fdctdsp_init(&s->fdsp, avctx);
900 ff_me_cmp_init(&s->mecc, avctx);
901 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
902 ff_pixblockdsp_init(&s->pdsp, avctx);
903 ff_qpeldsp_init(&s->qdsp);
905 if (s->msmpeg4_version) {
906 int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
907 if (!(s->ac_stats = av_mallocz(ac_stats_size)))
908 return AVERROR(ENOMEM);
911 if (!(avctx->stats_out = av_mallocz(256)) ||
912 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
913 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
914 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
915 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
916 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
917 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
918 !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
919 !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
920 return AVERROR(ENOMEM);
922 if (s->noise_reduction) {
923 if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
924 return AVERROR(ENOMEM);
927 ff_dct_encode_init(s);
929 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
930 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
932 if (s->slice_context_count > 1) {
935 if (avctx->codec_id == AV_CODEC_ID_H263P)
936 s->h263_slice_structured = 1;
939 s->quant_precision = 5;
941 #if FF_API_PRIVATE_OPT
942 FF_DISABLE_DEPRECATION_WARNINGS
943 if (avctx->frame_skip_threshold)
944 s->frame_skip_threshold = avctx->frame_skip_threshold;
945 if (avctx->frame_skip_factor)
946 s->frame_skip_factor = avctx->frame_skip_factor;
947 if (avctx->frame_skip_exp)
948 s->frame_skip_exp = avctx->frame_skip_exp;
949 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
950 s->frame_skip_cmp = avctx->frame_skip_cmp;
951 FF_ENABLE_DEPRECATION_WARNINGS
954 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
955 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
957 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
958 ff_h261_encode_init(s);
959 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
960 ff_h263_encode_init(s);
961 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
962 ff_msmpeg4_encode_init(s);
963 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
964 && s->out_format == FMT_MPEG1)
965 ff_mpeg1_encode_init(s);
968 for (i = 0; i < 64; i++) {
969 int j = s->idsp.idct_permutation[i];
970 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
972 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
973 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
974 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
976 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
977 } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
979 s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
982 s->chroma_intra_matrix[j] =
983 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
984 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
986 if (avctx->intra_matrix)
987 s->intra_matrix[j] = avctx->intra_matrix[i];
988 if (avctx->inter_matrix)
989 s->inter_matrix[j] = avctx->inter_matrix[i];
992 /* precompute matrix */
993 /* for mjpeg, we do include qscale in the matrix */
994 if (s->out_format != FMT_MJPEG) {
995 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
996 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
998 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
999 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1003 if ((ret = ff_rate_control_init(s)) < 0)
1006 #if FF_API_PRIVATE_OPT
1007 FF_DISABLE_DEPRECATION_WARNINGS
1008 if (avctx->brd_scale)
1009 s->brd_scale = avctx->brd_scale;
1011 if (avctx->prediction_method)
1012 s->pred = avctx->prediction_method + 1;
1013 FF_ENABLE_DEPRECATION_WARNINGS
1016 if (s->b_frame_strategy == 2) {
1017 for (i = 0; i < s->max_b_frames + 2; i++) {
1018 s->tmp_frames[i] = av_frame_alloc();
1019 if (!s->tmp_frames[i])
1020 return AVERROR(ENOMEM);
1022 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1023 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1024 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1026 ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1032 cpb_props = ff_add_cpb_side_data(avctx);
1034 return AVERROR(ENOMEM);
1035 cpb_props->max_bitrate = avctx->rc_max_rate;
1036 cpb_props->min_bitrate = avctx->rc_min_rate;
1037 cpb_props->avg_bitrate = avctx->bit_rate;
1038 cpb_props->buffer_size = avctx->rc_buffer_size;
1043 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1045 MpegEncContext *s = avctx->priv_data;
1048 ff_rate_control_uninit(s);
1050 ff_mpv_common_end(s);
1051 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
1052 s->out_format == FMT_MJPEG)
1053 ff_mjpeg_encode_close(s);
1055 av_freep(&avctx->extradata);
1057 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1058 av_frame_free(&s->tmp_frames[i]);
1060 ff_free_picture_tables(&s->new_picture);
1061 ff_mpeg_unref_picture(avctx, &s->new_picture);
1063 av_freep(&avctx->stats_out);
1064 av_freep(&s->ac_stats);
1066 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1067 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1068 s->q_chroma_intra_matrix= NULL;
1069 s->q_chroma_intra_matrix16= NULL;
1070 av_freep(&s->q_intra_matrix);
1071 av_freep(&s->q_inter_matrix);
1072 av_freep(&s->q_intra_matrix16);
1073 av_freep(&s->q_inter_matrix16);
1074 av_freep(&s->input_picture);
1075 av_freep(&s->reordered_input_picture);
1076 av_freep(&s->dct_offset);
1081 static int get_sae(uint8_t *src, int ref, int stride)
1086 for (y = 0; y < 16; y++) {
1087 for (x = 0; x < 16; x++) {
1088 acc += FFABS(src[x + y * stride] - ref);
1095 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1096 uint8_t *ref, int stride)
1102 h = s->height & ~15;
1104 for (y = 0; y < h; y += 16) {
1105 for (x = 0; x < w; x += 16) {
1106 int offset = x + y * stride;
1107 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1109 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1110 int sae = get_sae(src + offset, mean, stride);
1112 acc += sae + 500 < sad;
1118 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1120 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1121 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1122 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1123 &s->linesize, &s->uvlinesize);
1126 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1128 Picture *pic = NULL;
1130 int i, display_picture_number = 0, ret;
1131 int encoding_delay = s->max_b_frames ? s->max_b_frames
1132 : (s->low_delay ? 0 : 1);
1133 int flush_offset = 1;
1138 display_picture_number = s->input_picture_number++;
1140 if (pts != AV_NOPTS_VALUE) {
1141 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1142 int64_t last = s->user_specified_pts;
1145 av_log(s->avctx, AV_LOG_ERROR,
1146 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1148 return AVERROR(EINVAL);
1151 if (!s->low_delay && display_picture_number == 1)
1152 s->dts_delta = pts - last;
1154 s->user_specified_pts = pts;
1156 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1157 s->user_specified_pts =
1158 pts = s->user_specified_pts + 1;
1159 av_log(s->avctx, AV_LOG_INFO,
1160 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1163 pts = display_picture_number;
1167 if (!pic_arg->buf[0] ||
1168 pic_arg->linesize[0] != s->linesize ||
1169 pic_arg->linesize[1] != s->uvlinesize ||
1170 pic_arg->linesize[2] != s->uvlinesize)
1172 if ((s->width & 15) || (s->height & 15))
1174 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1176 if (s->linesize & (STRIDE_ALIGN-1))
1179 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1180 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1182 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1186 pic = &s->picture[i];
1190 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1193 ret = alloc_picture(s, pic, direct);
1198 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1199 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1200 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1203 int h_chroma_shift, v_chroma_shift;
1204 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1208 for (i = 0; i < 3; i++) {
1209 int src_stride = pic_arg->linesize[i];
1210 int dst_stride = i ? s->uvlinesize : s->linesize;
1211 int h_shift = i ? h_chroma_shift : 0;
1212 int v_shift = i ? v_chroma_shift : 0;
1213 int w = s->width >> h_shift;
1214 int h = s->height >> v_shift;
1215 uint8_t *src = pic_arg->data[i];
1216 uint8_t *dst = pic->f->data[i];
1219 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1220 && !s->progressive_sequence
1221 && FFALIGN(s->height, 32) - s->height > 16)
1224 if (!s->avctx->rc_buffer_size)
1225 dst += INPLACE_OFFSET;
1227 if (src_stride == dst_stride)
1228 memcpy(dst, src, src_stride * h);
1231 uint8_t *dst2 = dst;
1233 memcpy(dst2, src, w);
1238 if ((s->width & 15) || (s->height & (vpad-1))) {
1239 s->mpvencdsp.draw_edges(dst, dst_stride,
1249 ret = av_frame_copy_props(pic->f, pic_arg);
1253 pic->f->display_picture_number = display_picture_number;
1254 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1256 /* Flushing: When we have not received enough input frames,
1257 * ensure s->input_picture[0] contains the first picture */
1258 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1259 if (s->input_picture[flush_offset])
1262 if (flush_offset <= 1)
1265 encoding_delay = encoding_delay - flush_offset + 1;
1268 /* shift buffer entries */
1269 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1270 s->input_picture[i - flush_offset] = s->input_picture[i];
1272 s->input_picture[encoding_delay] = (Picture*) pic;
1277 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1281 int64_t score64 = 0;
1283 for (plane = 0; plane < 3; plane++) {
1284 const int stride = p->f->linesize[plane];
1285 const int bw = plane ? 1 : 2;
1286 for (y = 0; y < s->mb_height * bw; y++) {
1287 for (x = 0; x < s->mb_width * bw; x++) {
1288 int off = p->shared ? 0 : 16;
1289 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1290 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1291 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1293 switch (FFABS(s->frame_skip_exp)) {
1294 case 0: score = FFMAX(score, v); break;
1295 case 1: score += FFABS(v); break;
1296 case 2: score64 += v * (int64_t)v; break;
1297 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1298 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1307 if (s->frame_skip_exp < 0)
1308 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1309 -1.0/s->frame_skip_exp);
1311 if (score64 < s->frame_skip_threshold)
1313 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1318 static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
1323 ret = avcodec_send_frame(c, frame);
1328 ret = avcodec_receive_packet(c, pkt);
1331 av_packet_unref(pkt);
1332 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1339 static int estimate_best_b_count(MpegEncContext *s)
1341 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1343 const int scale = s->brd_scale;
1344 int width = s->width >> scale;
1345 int height = s->height >> scale;
1346 int i, j, out_size, p_lambda, b_lambda, lambda2;
1347 int64_t best_rd = INT64_MAX;
1348 int best_b_count = -1;
1351 av_assert0(scale >= 0 && scale <= 3);
1353 pkt = av_packet_alloc();
1355 return AVERROR(ENOMEM);
1358 //s->next_picture_ptr->quality;
1359 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1360 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1361 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1362 if (!b_lambda) // FIXME we should do this somewhere else
1363 b_lambda = p_lambda;
1364 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1367 for (i = 0; i < s->max_b_frames + 2; i++) {
1368 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1369 s->next_picture_ptr;
1372 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1373 pre_input = *pre_input_ptr;
1374 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1376 if (!pre_input.shared && i) {
1377 data[0] += INPLACE_OFFSET;
1378 data[1] += INPLACE_OFFSET;
1379 data[2] += INPLACE_OFFSET;
1382 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1383 s->tmp_frames[i]->linesize[0],
1385 pre_input.f->linesize[0],
1387 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1388 s->tmp_frames[i]->linesize[1],
1390 pre_input.f->linesize[1],
1391 width >> 1, height >> 1);
1392 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1393 s->tmp_frames[i]->linesize[2],
1395 pre_input.f->linesize[2],
1396 width >> 1, height >> 1);
1400 for (j = 0; j < s->max_b_frames + 1; j++) {
1404 if (!s->input_picture[j])
1407 c = avcodec_alloc_context3(NULL);
1409 ret = AVERROR(ENOMEM);
1415 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1416 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1417 c->mb_decision = s->avctx->mb_decision;
1418 c->me_cmp = s->avctx->me_cmp;
1419 c->mb_cmp = s->avctx->mb_cmp;
1420 c->me_sub_cmp = s->avctx->me_sub_cmp;
1421 c->pix_fmt = AV_PIX_FMT_YUV420P;
1422 c->time_base = s->avctx->time_base;
1423 c->max_b_frames = s->max_b_frames;
1425 ret = avcodec_open2(c, codec, NULL);
1430 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1431 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1433 out_size = encode_frame(c, s->tmp_frames[0], pkt);
1439 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1441 for (i = 0; i < s->max_b_frames + 1; i++) {
1442 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1444 s->tmp_frames[i + 1]->pict_type = is_p ?
1445 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1446 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1448 out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1454 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1457 /* get the delayed frames */
1458 out_size = encode_frame(c, NULL, pkt);
1463 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1465 rd += c->error[0] + c->error[1] + c->error[2];
1473 avcodec_free_context(&c);
1474 av_packet_unref(pkt);
1481 av_packet_free(&pkt);
1483 return best_b_count;
1486 static int select_input_picture(MpegEncContext *s)
1490 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1491 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1492 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1494 /* set next picture type & ordering */
1495 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1496 if (s->frame_skip_threshold || s->frame_skip_factor) {
1497 if (s->picture_in_gop_number < s->gop_size &&
1498 s->next_picture_ptr &&
1499 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1500 // FIXME check that the gop check above is +-1 correct
1501 av_frame_unref(s->input_picture[0]->f);
1503 ff_vbv_update(s, 0);
1509 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1510 !s->next_picture_ptr || s->intra_only) {
1511 s->reordered_input_picture[0] = s->input_picture[0];
1512 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1513 s->reordered_input_picture[0]->f->coded_picture_number =
1514 s->coded_picture_number++;
1518 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1519 for (i = 0; i < s->max_b_frames + 1; i++) {
1520 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1522 if (pict_num >= s->rc_context.num_entries)
1524 if (!s->input_picture[i]) {
1525 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1529 s->input_picture[i]->f->pict_type =
1530 s->rc_context.entry[pict_num].new_pict_type;
1534 if (s->b_frame_strategy == 0) {
1535 b_frames = s->max_b_frames;
1536 while (b_frames && !s->input_picture[b_frames])
1538 } else if (s->b_frame_strategy == 1) {
1539 for (i = 1; i < s->max_b_frames + 1; i++) {
1540 if (s->input_picture[i] &&
1541 s->input_picture[i]->b_frame_score == 0) {
1542 s->input_picture[i]->b_frame_score =
1544 s->input_picture[i ]->f->data[0],
1545 s->input_picture[i - 1]->f->data[0],
1549 for (i = 0; i < s->max_b_frames + 1; i++) {
1550 if (!s->input_picture[i] ||
1551 s->input_picture[i]->b_frame_score - 1 >
1552 s->mb_num / s->b_sensitivity)
1556 b_frames = FFMAX(0, i - 1);
1559 for (i = 0; i < b_frames + 1; i++) {
1560 s->input_picture[i]->b_frame_score = 0;
1562 } else if (s->b_frame_strategy == 2) {
1563 b_frames = estimate_best_b_count(s);
1570 for (i = b_frames - 1; i >= 0; i--) {
1571 int type = s->input_picture[i]->f->pict_type;
1572 if (type && type != AV_PICTURE_TYPE_B)
1575 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1576 b_frames == s->max_b_frames) {
1577 av_log(s->avctx, AV_LOG_ERROR,
1578 "warning, too many B-frames in a row\n");
1581 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1582 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1583 s->gop_size > s->picture_in_gop_number) {
1584 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1586 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1588 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1592 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1593 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1596 s->reordered_input_picture[0] = s->input_picture[b_frames];
1597 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1598 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1599 s->reordered_input_picture[0]->f->coded_picture_number =
1600 s->coded_picture_number++;
1601 for (i = 0; i < b_frames; i++) {
1602 s->reordered_input_picture[i + 1] = s->input_picture[i];
1603 s->reordered_input_picture[i + 1]->f->pict_type =
1605 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1606 s->coded_picture_number++;
1611 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1613 if (s->reordered_input_picture[0]) {
1614 s->reordered_input_picture[0]->reference =
1615 s->reordered_input_picture[0]->f->pict_type !=
1616 AV_PICTURE_TYPE_B ? 3 : 0;
1618 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1621 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1622 // input is a shared pix, so we can't modify it -> allocate a new
1623 // one & ensure that the shared one is reuseable
1626 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1629 pic = &s->picture[i];
1631 pic->reference = s->reordered_input_picture[0]->reference;
1632 if (alloc_picture(s, pic, 0) < 0) {
1636 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1640 /* mark us unused / free shared pic */
1641 av_frame_unref(s->reordered_input_picture[0]->f);
1642 s->reordered_input_picture[0]->shared = 0;
1644 s->current_picture_ptr = pic;
1646 // input is not a shared pix -> reuse buffer for current_pix
1647 s->current_picture_ptr = s->reordered_input_picture[0];
1648 for (i = 0; i < 4; i++) {
1649 if (s->new_picture.f->data[i])
1650 s->new_picture.f->data[i] += INPLACE_OFFSET;
1653 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1654 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1655 s->current_picture_ptr)) < 0)
1658 s->picture_number = s->new_picture.f->display_picture_number;
1663 static void frame_end(MpegEncContext *s)
1665 if (s->unrestricted_mv &&
1666 s->current_picture.reference &&
1668 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1669 int hshift = desc->log2_chroma_w;
1670 int vshift = desc->log2_chroma_h;
1671 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1672 s->current_picture.f->linesize[0],
1673 s->h_edge_pos, s->v_edge_pos,
1674 EDGE_WIDTH, EDGE_WIDTH,
1675 EDGE_TOP | EDGE_BOTTOM);
1676 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1677 s->current_picture.f->linesize[1],
1678 s->h_edge_pos >> hshift,
1679 s->v_edge_pos >> vshift,
1680 EDGE_WIDTH >> hshift,
1681 EDGE_WIDTH >> vshift,
1682 EDGE_TOP | EDGE_BOTTOM);
1683 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1684 s->current_picture.f->linesize[2],
1685 s->h_edge_pos >> hshift,
1686 s->v_edge_pos >> vshift,
1687 EDGE_WIDTH >> hshift,
1688 EDGE_WIDTH >> vshift,
1689 EDGE_TOP | EDGE_BOTTOM);
1694 s->last_pict_type = s->pict_type;
1695 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1696 if (s->pict_type!= AV_PICTURE_TYPE_B)
1697 s->last_non_b_pict_type = s->pict_type;
1699 #if FF_API_CODED_FRAME
1700 FF_DISABLE_DEPRECATION_WARNINGS
1701 av_frame_unref(s->avctx->coded_frame);
1702 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1703 FF_ENABLE_DEPRECATION_WARNINGS
1705 #if FF_API_ERROR_FRAME
1706 FF_DISABLE_DEPRECATION_WARNINGS
1707 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1708 sizeof(s->current_picture.encoding_error));
1709 FF_ENABLE_DEPRECATION_WARNINGS
1713 static void update_noise_reduction(MpegEncContext *s)
1717 for (intra = 0; intra < 2; intra++) {
1718 if (s->dct_count[intra] > (1 << 16)) {
1719 for (i = 0; i < 64; i++) {
1720 s->dct_error_sum[intra][i] >>= 1;
1722 s->dct_count[intra] >>= 1;
1725 for (i = 0; i < 64; i++) {
1726 s->dct_offset[intra][i] = (s->noise_reduction *
1727 s->dct_count[intra] +
1728 s->dct_error_sum[intra][i] / 2) /
1729 (s->dct_error_sum[intra][i] + 1);
1734 static int frame_start(MpegEncContext *s)
1738 /* mark & release old frames */
1739 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1740 s->last_picture_ptr != s->next_picture_ptr &&
1741 s->last_picture_ptr->f->buf[0]) {
1742 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1745 s->current_picture_ptr->f->pict_type = s->pict_type;
1746 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1748 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1749 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1750 s->current_picture_ptr)) < 0)
1753 if (s->pict_type != AV_PICTURE_TYPE_B) {
1754 s->last_picture_ptr = s->next_picture_ptr;
1756 s->next_picture_ptr = s->current_picture_ptr;
1759 if (s->last_picture_ptr) {
1760 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1761 if (s->last_picture_ptr->f->buf[0] &&
1762 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1763 s->last_picture_ptr)) < 0)
1766 if (s->next_picture_ptr) {
1767 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1768 if (s->next_picture_ptr->f->buf[0] &&
1769 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1770 s->next_picture_ptr)) < 0)
1774 if (s->picture_structure!= PICT_FRAME) {
1776 for (i = 0; i < 4; i++) {
1777 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1778 s->current_picture.f->data[i] +=
1779 s->current_picture.f->linesize[i];
1781 s->current_picture.f->linesize[i] *= 2;
1782 s->last_picture.f->linesize[i] *= 2;
1783 s->next_picture.f->linesize[i] *= 2;
1787 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1788 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1789 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1790 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1791 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1792 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1794 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1795 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1798 if (s->dct_error_sum) {
1799 av_assert2(s->noise_reduction && s->encoding);
1800 update_noise_reduction(s);
1806 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1807 const AVFrame *pic_arg, int *got_packet)
1809 MpegEncContext *s = avctx->priv_data;
1810 int i, stuffing_count, ret;
1811 int context_count = s->slice_context_count;
1813 s->vbv_ignore_qmax = 0;
1815 s->picture_in_gop_number++;
1817 if (load_input_picture(s, pic_arg) < 0)
1820 if (select_input_picture(s) < 0) {
1825 if (s->new_picture.f->data[0]) {
1826 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1827 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1829 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1830 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1833 s->mb_info_ptr = av_packet_new_side_data(pkt,
1834 AV_PKT_DATA_H263_MB_INFO,
1835 s->mb_width*s->mb_height*12);
1836 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1839 for (i = 0; i < context_count; i++) {
1840 int start_y = s->thread_context[i]->start_mb_y;
1841 int end_y = s->thread_context[i]-> end_mb_y;
1842 int h = s->mb_height;
1843 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1844 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1846 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1849 s->pict_type = s->new_picture.f->pict_type;
1851 ret = frame_start(s);
1855 ret = encode_picture(s, s->picture_number);
1856 if (growing_buffer) {
1857 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1858 pkt->data = s->pb.buf;
1859 pkt->size = avctx->internal->byte_buffer_size;
1864 #if FF_API_STAT_BITS
1865 FF_DISABLE_DEPRECATION_WARNINGS
1866 avctx->header_bits = s->header_bits;
1867 avctx->mv_bits = s->mv_bits;
1868 avctx->misc_bits = s->misc_bits;
1869 avctx->i_tex_bits = s->i_tex_bits;
1870 avctx->p_tex_bits = s->p_tex_bits;
1871 avctx->i_count = s->i_count;
1872 // FIXME f/b_count in avctx
1873 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1874 avctx->skip_count = s->skip_count;
1875 FF_ENABLE_DEPRECATION_WARNINGS
1880 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
1881 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1883 if (avctx->rc_buffer_size) {
1884 RateControlContext *rcc = &s->rc_context;
1885 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1886 int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1887 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1889 if (put_bits_count(&s->pb) > max_size &&
1890 s->lambda < s->lmax) {
1891 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1892 (s->qscale + 1) / s->qscale);
1893 if (s->adaptive_quant) {
1895 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1896 s->lambda_table[i] =
1897 FFMAX(s->lambda_table[i] + min_step,
1898 s->lambda_table[i] * (s->qscale + 1) /
1901 s->mb_skipped = 0; // done in frame_start()
1902 // done in encode_picture() so we must undo it
1903 if (s->pict_type == AV_PICTURE_TYPE_P) {
1904 if (s->flipflop_rounding ||
1905 s->codec_id == AV_CODEC_ID_H263P ||
1906 s->codec_id == AV_CODEC_ID_MPEG4)
1907 s->no_rounding ^= 1;
1909 if (s->pict_type != AV_PICTURE_TYPE_B) {
1910 s->time_base = s->last_time_base;
1911 s->last_non_b_time = s->time - s->pp_time;
1913 for (i = 0; i < context_count; i++) {
1914 PutBitContext *pb = &s->thread_context[i]->pb;
1915 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1917 s->vbv_ignore_qmax = 1;
1918 av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1922 av_assert0(avctx->rc_max_rate);
1925 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1926 ff_write_pass1_stats(s);
1928 for (i = 0; i < 4; i++) {
1929 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1930 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1932 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1933 s->current_picture_ptr->encoding_error,
1934 (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1937 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1938 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1939 s->misc_bits + s->i_tex_bits +
1941 flush_put_bits(&s->pb);
1942 s->frame_bits = put_bits_count(&s->pb);
1944 stuffing_count = ff_vbv_update(s, s->frame_bits);
1945 s->stuffing_bits = 8*stuffing_count;
1946 if (stuffing_count) {
1947 if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1948 av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1952 switch (s->codec_id) {
1953 case AV_CODEC_ID_MPEG1VIDEO:
1954 case AV_CODEC_ID_MPEG2VIDEO:
1955 while (stuffing_count--) {
1956 put_bits(&s->pb, 8, 0);
1959 case AV_CODEC_ID_MPEG4:
1960 put_bits(&s->pb, 16, 0);
1961 put_bits(&s->pb, 16, 0x1C3);
1962 stuffing_count -= 4;
1963 while (stuffing_count--) {
1964 put_bits(&s->pb, 8, 0xFF);
1968 av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1970 flush_put_bits(&s->pb);
1971 s->frame_bits = put_bits_count(&s->pb);
1974 /* update MPEG-1/2 vbv_delay for CBR */
1975 if (avctx->rc_max_rate &&
1976 avctx->rc_min_rate == avctx->rc_max_rate &&
1977 s->out_format == FMT_MPEG1 &&
1978 90000LL * (avctx->rc_buffer_size - 1) <=
1979 avctx->rc_max_rate * 0xFFFFLL) {
1980 AVCPBProperties *props;
1983 int vbv_delay, min_delay;
1984 double inbits = avctx->rc_max_rate *
1985 av_q2d(avctx->time_base);
1986 int minbits = s->frame_bits - 8 *
1987 (s->vbv_delay_ptr - s->pb.buf - 1);
1988 double bits = s->rc_context.buffer_index + minbits - inbits;
1991 av_log(avctx, AV_LOG_ERROR,
1992 "Internal error, negative bits\n");
1994 av_assert1(s->repeat_first_field == 0);
1996 vbv_delay = bits * 90000 / avctx->rc_max_rate;
1997 min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2000 vbv_delay = FFMAX(vbv_delay, min_delay);
2002 av_assert0(vbv_delay < 0xFFFF);
2004 s->vbv_delay_ptr[0] &= 0xF8;
2005 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2006 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2007 s->vbv_delay_ptr[2] &= 0x07;
2008 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2010 props = av_cpb_properties_alloc(&props_size);
2012 return AVERROR(ENOMEM);
2013 props->vbv_delay = vbv_delay * 300;
2015 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2016 (uint8_t*)props, props_size);
2022 #if FF_API_VBV_DELAY
2023 FF_DISABLE_DEPRECATION_WARNINGS
2024 avctx->vbv_delay = vbv_delay * 300;
2025 FF_ENABLE_DEPRECATION_WARNINGS
2028 s->total_bits += s->frame_bits;
2029 #if FF_API_STAT_BITS
2030 FF_DISABLE_DEPRECATION_WARNINGS
2031 avctx->frame_bits = s->frame_bits;
2032 FF_ENABLE_DEPRECATION_WARNINGS
2036 pkt->pts = s->current_picture.f->pts;
2037 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2038 if (!s->current_picture.f->coded_picture_number)
2039 pkt->dts = pkt->pts - s->dts_delta;
2041 pkt->dts = s->reordered_pts;
2042 s->reordered_pts = pkt->pts;
2044 pkt->dts = pkt->pts;
2045 if (s->current_picture.f->key_frame)
2046 pkt->flags |= AV_PKT_FLAG_KEY;
2048 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2053 /* release non-reference frames */
2054 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2055 if (!s->picture[i].reference)
2056 ff_mpeg_unref_picture(avctx, &s->picture[i]);
2059 av_assert1((s->frame_bits & 7) == 0);
2061 pkt->size = s->frame_bits / 8;
2062 *got_packet = !!pkt->size;
2066 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2067 int n, int threshold)
2069 static const char tab[64] = {
2070 3, 2, 2, 1, 1, 1, 1, 1,
2071 1, 1, 1, 1, 1, 1, 1, 1,
2072 1, 1, 1, 1, 1, 1, 1, 1,
2073 0, 0, 0, 0, 0, 0, 0, 0,
2074 0, 0, 0, 0, 0, 0, 0, 0,
2075 0, 0, 0, 0, 0, 0, 0, 0,
2076 0, 0, 0, 0, 0, 0, 0, 0,
2077 0, 0, 0, 0, 0, 0, 0, 0
2082 int16_t *block = s->block[n];
2083 const int last_index = s->block_last_index[n];
2086 if (threshold < 0) {
2088 threshold = -threshold;
2092 /* Are all we could set to zero already zero? */
2093 if (last_index <= skip_dc - 1)
2096 for (i = 0; i <= last_index; i++) {
2097 const int j = s->intra_scantable.permutated[i];
2098 const int level = FFABS(block[j]);
2100 if (skip_dc && i == 0)
2104 } else if (level > 1) {
2110 if (score >= threshold)
2112 for (i = skip_dc; i <= last_index; i++) {
2113 const int j = s->intra_scantable.permutated[i];
2117 s->block_last_index[n] = 0;
2119 s->block_last_index[n] = -1;
2122 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2126 const int maxlevel = s->max_qcoeff;
2127 const int minlevel = s->min_qcoeff;
2131 i = 1; // skip clipping of intra dc
2135 for (; i <= last_index; i++) {
2136 const int j = s->intra_scantable.permutated[i];
2137 int level = block[j];
2139 if (level > maxlevel) {
2142 } else if (level < minlevel) {
2150 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2151 av_log(s->avctx, AV_LOG_INFO,
2152 "warning, clipping %d dct coefficients to %d..%d\n",
2153 overflow, minlevel, maxlevel);
2156 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2160 for (y = 0; y < 8; y++) {
2161 for (x = 0; x < 8; x++) {
2167 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2168 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2169 int v = ptr[x2 + y2 * stride];
2175 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2180 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2181 int motion_x, int motion_y,
2182 int mb_block_height,
2186 int16_t weight[12][64];
2187 int16_t orig[12][64];
2188 const int mb_x = s->mb_x;
2189 const int mb_y = s->mb_y;
2192 int dct_offset = s->linesize * 8; // default for progressive frames
2193 int uv_dct_offset = s->uvlinesize * 8;
2194 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2195 ptrdiff_t wrap_y, wrap_c;
2197 for (i = 0; i < mb_block_count; i++)
2198 skip_dct[i] = s->skipdct;
2200 if (s->adaptive_quant) {
2201 const int last_qp = s->qscale;
2202 const int mb_xy = mb_x + mb_y * s->mb_stride;
2204 s->lambda = s->lambda_table[mb_xy];
2207 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2208 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2209 s->dquant = s->qscale - last_qp;
2211 if (s->out_format == FMT_H263) {
2212 s->dquant = av_clip(s->dquant, -2, 2);
2214 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2216 if (s->pict_type == AV_PICTURE_TYPE_B) {
2217 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2220 if (s->mv_type == MV_TYPE_8X8)
2226 ff_set_qscale(s, last_qp + s->dquant);
2227 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2228 ff_set_qscale(s, s->qscale + s->dquant);
2230 wrap_y = s->linesize;
2231 wrap_c = s->uvlinesize;
2232 ptr_y = s->new_picture.f->data[0] +
2233 (mb_y * 16 * wrap_y) + mb_x * 16;
2234 ptr_cb = s->new_picture.f->data[1] +
2235 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2236 ptr_cr = s->new_picture.f->data[2] +
2237 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2239 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2240 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2241 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2242 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2243 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2245 16, 16, mb_x * 16, mb_y * 16,
2246 s->width, s->height);
2248 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2250 mb_block_width, mb_block_height,
2251 mb_x * mb_block_width, mb_y * mb_block_height,
2253 ptr_cb = ebuf + 16 * wrap_y;
2254 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2256 mb_block_width, mb_block_height,
2257 mb_x * mb_block_width, mb_y * mb_block_height,
2259 ptr_cr = ebuf + 16 * wrap_y + 16;
2263 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2264 int progressive_score, interlaced_score;
2266 s->interlaced_dct = 0;
2267 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2268 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2269 NULL, wrap_y, 8) - 400;
2271 if (progressive_score > 0) {
2272 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2273 NULL, wrap_y * 2, 8) +
2274 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2275 NULL, wrap_y * 2, 8);
2276 if (progressive_score > interlaced_score) {
2277 s->interlaced_dct = 1;
2279 dct_offset = wrap_y;
2280 uv_dct_offset = wrap_c;
2282 if (s->chroma_format == CHROMA_422 ||
2283 s->chroma_format == CHROMA_444)
2289 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2290 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2291 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2292 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2294 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2298 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2299 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2300 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2301 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2302 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2303 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2304 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2305 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2306 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2307 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2308 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2309 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2313 op_pixels_func (*op_pix)[4];
2314 qpel_mc_func (*op_qpix)[16];
2315 uint8_t *dest_y, *dest_cb, *dest_cr;
2317 dest_y = s->dest[0];
2318 dest_cb = s->dest[1];
2319 dest_cr = s->dest[2];
2321 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2322 op_pix = s->hdsp.put_pixels_tab;
2323 op_qpix = s->qdsp.put_qpel_pixels_tab;
2325 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2326 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2329 if (s->mv_dir & MV_DIR_FORWARD) {
2330 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2331 s->last_picture.f->data,
2333 op_pix = s->hdsp.avg_pixels_tab;
2334 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2336 if (s->mv_dir & MV_DIR_BACKWARD) {
2337 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2338 s->next_picture.f->data,
2342 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2343 int progressive_score, interlaced_score;
2345 s->interlaced_dct = 0;
2346 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2347 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2351 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2352 progressive_score -= 400;
2354 if (progressive_score > 0) {
2355 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2357 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2361 if (progressive_score > interlaced_score) {
2362 s->interlaced_dct = 1;
2364 dct_offset = wrap_y;
2365 uv_dct_offset = wrap_c;
2367 if (s->chroma_format == CHROMA_422)
2373 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2374 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2375 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2376 dest_y + dct_offset, wrap_y);
2377 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2378 dest_y + dct_offset + 8, wrap_y);
2380 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2384 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2385 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2386 if (!s->chroma_y_shift) { /* 422 */
2387 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2388 dest_cb + uv_dct_offset, wrap_c);
2389 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2390 dest_cr + uv_dct_offset, wrap_c);
2393 /* pre quantization */
2394 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2395 2 * s->qscale * s->qscale) {
2397 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2399 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2401 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2402 wrap_y, 8) < 20 * s->qscale)
2404 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2405 wrap_y, 8) < 20 * s->qscale)
2407 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2409 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2411 if (!s->chroma_y_shift) { /* 422 */
2412 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2413 dest_cb + uv_dct_offset,
2414 wrap_c, 8) < 20 * s->qscale)
2416 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2417 dest_cr + uv_dct_offset,
2418 wrap_c, 8) < 20 * s->qscale)
2424 if (s->quantizer_noise_shaping) {
2426 get_visual_weight(weight[0], ptr_y , wrap_y);
2428 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2430 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2432 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2434 get_visual_weight(weight[4], ptr_cb , wrap_c);
2436 get_visual_weight(weight[5], ptr_cr , wrap_c);
2437 if (!s->chroma_y_shift) { /* 422 */
2439 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2442 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2445 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2448 /* DCT & quantize */
2449 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2451 for (i = 0; i < mb_block_count; i++) {
2454 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2455 // FIXME we could decide to change to quantizer instead of
2457 // JS: I don't think that would be a good idea it could lower
2458 // quality instead of improve it. Just INTRADC clipping
2459 // deserves changes in quantizer
2461 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2463 s->block_last_index[i] = -1;
2465 if (s->quantizer_noise_shaping) {
2466 for (i = 0; i < mb_block_count; i++) {
2468 s->block_last_index[i] =
2469 dct_quantize_refine(s, s->block[i], weight[i],
2470 orig[i], i, s->qscale);
2475 if (s->luma_elim_threshold && !s->mb_intra)
2476 for (i = 0; i < 4; i++)
2477 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2478 if (s->chroma_elim_threshold && !s->mb_intra)
2479 for (i = 4; i < mb_block_count; i++)
2480 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2482 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2483 for (i = 0; i < mb_block_count; i++) {
2484 if (s->block_last_index[i] == -1)
2485 s->coded_score[i] = INT_MAX / 256;
2490 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2491 s->block_last_index[4] =
2492 s->block_last_index[5] = 0;
2494 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2495 if (!s->chroma_y_shift) { /* 422 / 444 */
2496 for (i=6; i<12; i++) {
2497 s->block_last_index[i] = 0;
2498 s->block[i][0] = s->block[4][0];
2503 // non c quantize code returns incorrect block_last_index FIXME
2504 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2505 for (i = 0; i < mb_block_count; i++) {
2507 if (s->block_last_index[i] > 0) {
2508 for (j = 63; j > 0; j--) {
2509 if (s->block[i][s->intra_scantable.permutated[j]])
2512 s->block_last_index[i] = j;
2517 /* huffman encode */
2518 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2519 case AV_CODEC_ID_MPEG1VIDEO:
2520 case AV_CODEC_ID_MPEG2VIDEO:
2521 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2522 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2524 case AV_CODEC_ID_MPEG4:
2525 if (CONFIG_MPEG4_ENCODER)
2526 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2528 case AV_CODEC_ID_MSMPEG4V2:
2529 case AV_CODEC_ID_MSMPEG4V3:
2530 case AV_CODEC_ID_WMV1:
2531 if (CONFIG_MSMPEG4_ENCODER)
2532 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2534 case AV_CODEC_ID_WMV2:
2535 if (CONFIG_WMV2_ENCODER)
2536 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2538 case AV_CODEC_ID_H261:
2539 if (CONFIG_H261_ENCODER)
2540 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2542 case AV_CODEC_ID_H263:
2543 case AV_CODEC_ID_H263P:
2544 case AV_CODEC_ID_FLV1:
2545 case AV_CODEC_ID_RV10:
2546 case AV_CODEC_ID_RV20:
2547 if (CONFIG_H263_ENCODER)
2548 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2550 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2551 case AV_CODEC_ID_MJPEG:
2552 case AV_CODEC_ID_AMV:
2553 ff_mjpeg_encode_mb(s, s->block);
2556 case AV_CODEC_ID_SPEEDHQ:
2557 if (CONFIG_SPEEDHQ_ENCODER)
2558 ff_speedhq_encode_mb(s, s->block);
2565 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2567 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2568 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2569 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2572 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2575 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2578 d->mb_skip_run= s->mb_skip_run;
2580 d->last_dc[i] = s->last_dc[i];
2583 d->mv_bits= s->mv_bits;
2584 d->i_tex_bits= s->i_tex_bits;
2585 d->p_tex_bits= s->p_tex_bits;
2586 d->i_count= s->i_count;
2587 d->f_count= s->f_count;
2588 d->b_count= s->b_count;
2589 d->skip_count= s->skip_count;
2590 d->misc_bits= s->misc_bits;
2594 d->qscale= s->qscale;
2595 d->dquant= s->dquant;
2597 d->esc3_level_length= s->esc3_level_length;
2600 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2603 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2604 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2607 d->mb_skip_run= s->mb_skip_run;
2609 d->last_dc[i] = s->last_dc[i];
2612 d->mv_bits= s->mv_bits;
2613 d->i_tex_bits= s->i_tex_bits;
2614 d->p_tex_bits= s->p_tex_bits;
2615 d->i_count= s->i_count;
2616 d->f_count= s->f_count;
2617 d->b_count= s->b_count;
2618 d->skip_count= s->skip_count;
2619 d->misc_bits= s->misc_bits;
2621 d->mb_intra= s->mb_intra;
2622 d->mb_skipped= s->mb_skipped;
2623 d->mv_type= s->mv_type;
2624 d->mv_dir= s->mv_dir;
2626 if(s->data_partitioning){
2628 d->tex_pb= s->tex_pb;
2632 d->block_last_index[i]= s->block_last_index[i];
2633 d->interlaced_dct= s->interlaced_dct;
2634 d->qscale= s->qscale;
2636 d->esc3_level_length= s->esc3_level_length;
2639 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2640 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2641 int *dmin, int *next_block, int motion_x, int motion_y)
2644 uint8_t *dest_backup[3];
2646 copy_context_before_encode(s, backup, type);
2648 s->block= s->blocks[*next_block];
2649 s->pb= pb[*next_block];
2650 if(s->data_partitioning){
2651 s->pb2 = pb2 [*next_block];
2652 s->tex_pb= tex_pb[*next_block];
2656 memcpy(dest_backup, s->dest, sizeof(s->dest));
2657 s->dest[0] = s->sc.rd_scratchpad;
2658 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2659 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2660 av_assert0(s->linesize >= 32); //FIXME
2663 encode_mb(s, motion_x, motion_y);
2665 score= put_bits_count(&s->pb);
2666 if(s->data_partitioning){
2667 score+= put_bits_count(&s->pb2);
2668 score+= put_bits_count(&s->tex_pb);
2671 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2672 ff_mpv_reconstruct_mb(s, s->block);
2674 score *= s->lambda2;
2675 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2679 memcpy(s->dest, dest_backup, sizeof(s->dest));
2686 copy_context_after_encode(best, s, type);
2690 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2691 const uint32_t *sq = ff_square_tab + 256;
2696 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2697 else if(w==8 && h==8)
2698 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2702 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2711 static int sse_mb(MpegEncContext *s){
2715 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2716 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2719 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2720 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2721 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2722 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2724 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2725 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2726 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2729 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2730 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2731 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2734 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2735 MpegEncContext *s= *(void**)arg;
2739 s->me.dia_size= s->avctx->pre_dia_size;
2740 s->first_slice_line=1;
2741 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2742 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2743 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2745 s->first_slice_line=0;
2753 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2754 MpegEncContext *s= *(void**)arg;
2756 s->me.dia_size= s->avctx->dia_size;
2757 s->first_slice_line=1;
2758 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2759 s->mb_x=0; //for block init below
2760 ff_init_block_index(s);
2761 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2762 s->block_index[0]+=2;
2763 s->block_index[1]+=2;
2764 s->block_index[2]+=2;
2765 s->block_index[3]+=2;
2767 /* compute motion vector & mb_type and store in context */
2768 if(s->pict_type==AV_PICTURE_TYPE_B)
2769 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2771 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2773 s->first_slice_line=0;
2778 static int mb_var_thread(AVCodecContext *c, void *arg){
2779 MpegEncContext *s= *(void**)arg;
2782 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2783 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2786 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2788 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2790 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2791 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2793 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2794 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2795 s->me.mb_var_sum_temp += varc;
2801 static void write_slice_end(MpegEncContext *s){
2802 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2803 if(s->partitioned_frame){
2804 ff_mpeg4_merge_partitions(s);
2807 ff_mpeg4_stuffing(&s->pb);
2808 } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2809 s->out_format == FMT_MJPEG) {
2810 ff_mjpeg_encode_stuffing(s);
2811 } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2812 ff_speedhq_end_slice(s);
2815 flush_put_bits(&s->pb);
2817 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2818 s->misc_bits+= get_bits_diff(s);
2821 static void write_mb_info(MpegEncContext *s)
2823 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2824 int offset = put_bits_count(&s->pb);
2825 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2826 int gobn = s->mb_y / s->gob_index;
2828 if (CONFIG_H263_ENCODER)
2829 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2830 bytestream_put_le32(&ptr, offset);
2831 bytestream_put_byte(&ptr, s->qscale);
2832 bytestream_put_byte(&ptr, gobn);
2833 bytestream_put_le16(&ptr, mba);
2834 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2835 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2836 /* 4MV not implemented */
2837 bytestream_put_byte(&ptr, 0); /* hmv2 */
2838 bytestream_put_byte(&ptr, 0); /* vmv2 */
2841 static void update_mb_info(MpegEncContext *s, int startcode)
2845 if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2846 s->mb_info_size += 12;
2847 s->prev_mb_info = s->last_mb_info;
2850 s->prev_mb_info = put_bytes_count(&s->pb, 0);
2851 /* This might have incremented mb_info_size above, and we return without
2852 * actually writing any info into that slot yet. But in that case,
2853 * this will be called again at the start of the after writing the
2854 * start code, actually writing the mb info. */
2858 s->last_mb_info = put_bytes_count(&s->pb, 0);
2859 if (!s->mb_info_size)
2860 s->mb_info_size += 12;
2864 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2866 if (put_bytes_left(&s->pb, 0) < threshold
2867 && s->slice_context_count == 1
2868 && s->pb.buf == s->avctx->internal->byte_buffer) {
2869 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2870 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2872 uint8_t *new_buffer = NULL;
2873 int new_buffer_size = 0;
2875 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2876 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2877 return AVERROR(ENOMEM);
2882 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2883 s->avctx->internal->byte_buffer_size + size_increase);
2885 return AVERROR(ENOMEM);
2887 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2888 av_free(s->avctx->internal->byte_buffer);
2889 s->avctx->internal->byte_buffer = new_buffer;
2890 s->avctx->internal->byte_buffer_size = new_buffer_size;
2891 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2892 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2893 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2895 if (put_bytes_left(&s->pb, 0) < threshold)
2896 return AVERROR(EINVAL);
2900 static int encode_thread(AVCodecContext *c, void *arg){
2901 MpegEncContext *s= *(void**)arg;
2902 int mb_x, mb_y, mb_y_order;
2903 int chr_h= 16>>s->chroma_y_shift;
2905 MpegEncContext best_s = { 0 }, backup_s;
2906 uint8_t bit_buf[2][MAX_MB_BYTES];
2907 uint8_t bit_buf2[2][MAX_MB_BYTES];
2908 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2909 PutBitContext pb[2], pb2[2], tex_pb[2];
2912 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2913 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2914 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2917 s->last_bits= put_bits_count(&s->pb);
2928 /* init last dc values */
2929 /* note: quant matrix value (8) is implied here */
2930 s->last_dc[i] = 128 << s->intra_dc_precision;
2932 s->current_picture.encoding_error[i] = 0;
2934 if(s->codec_id==AV_CODEC_ID_AMV){
2935 s->last_dc[0] = 128*8/13;
2936 s->last_dc[1] = 128*8/14;
2937 s->last_dc[2] = 128*8/14;
2940 memset(s->last_mv, 0, sizeof(s->last_mv));
2944 switch(s->codec_id){
2945 case AV_CODEC_ID_H263:
2946 case AV_CODEC_ID_H263P:
2947 case AV_CODEC_ID_FLV1:
2948 if (CONFIG_H263_ENCODER)
2949 s->gob_index = H263_GOB_HEIGHT(s->height);
2951 case AV_CODEC_ID_MPEG4:
2952 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2953 ff_mpeg4_init_partitions(s);
2959 s->first_slice_line = 1;
2960 s->ptr_lastgob = s->pb.buf;
2961 for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2962 if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
2964 mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
2965 if (first_in_slice && mb_y_order != s->start_mb_y)
2966 ff_speedhq_end_slice(s);
2967 s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
2974 ff_set_qscale(s, s->qscale);
2975 ff_init_block_index(s);
2977 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2978 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2979 int mb_type= s->mb_type[xy];
2983 int size_increase = s->avctx->internal->byte_buffer_size/4
2984 + s->mb_width*MAX_MB_BYTES;
2986 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2987 if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
2988 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2991 if(s->data_partitioning){
2992 if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
2993 put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
2994 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3000 s->mb_y = mb_y; // moved into loop, can get changed by H.261
3001 ff_update_block_index(s);
3003 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3004 ff_h261_reorder_mb_index(s);
3005 xy= s->mb_y*s->mb_stride + s->mb_x;
3006 mb_type= s->mb_type[xy];
3009 /* write gob / video packet header */
3011 int current_packet_size, is_gob_start;
3013 current_packet_size = put_bytes_count(&s->pb, 1)
3014 - (s->ptr_lastgob - s->pb.buf);
3016 is_gob_start = s->rtp_payload_size &&
3017 current_packet_size >= s->rtp_payload_size &&
3020 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3022 switch(s->codec_id){
3023 case AV_CODEC_ID_H263:
3024 case AV_CODEC_ID_H263P:
3025 if(!s->h263_slice_structured)
3026 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3028 case AV_CODEC_ID_MPEG2VIDEO:
3029 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3030 case AV_CODEC_ID_MPEG1VIDEO:
3031 if(s->mb_skip_run) is_gob_start=0;
3033 case AV_CODEC_ID_MJPEG:
3034 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3039 if(s->start_mb_y != mb_y || mb_x!=0){
3042 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3043 ff_mpeg4_init_partitions(s);
3047 av_assert2((put_bits_count(&s->pb)&7) == 0);
3048 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3050 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3051 int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
3052 int d = 100 / s->error_rate;
3054 current_packet_size=0;
3055 s->pb.buf_ptr= s->ptr_lastgob;
3056 av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3060 #if FF_API_RTP_CALLBACK
3061 FF_DISABLE_DEPRECATION_WARNINGS
3062 if (s->avctx->rtp_callback){
3063 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3064 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3066 FF_ENABLE_DEPRECATION_WARNINGS
3068 update_mb_info(s, 1);
3070 switch(s->codec_id){
3071 case AV_CODEC_ID_MPEG4:
3072 if (CONFIG_MPEG4_ENCODER) {
3073 ff_mpeg4_encode_video_packet_header(s);
3074 ff_mpeg4_clean_buffers(s);
3077 case AV_CODEC_ID_MPEG1VIDEO:
3078 case AV_CODEC_ID_MPEG2VIDEO:
3079 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3080 ff_mpeg1_encode_slice_header(s);
3081 ff_mpeg1_clean_buffers(s);
3084 case AV_CODEC_ID_H263:
3085 case AV_CODEC_ID_H263P:
3086 if (CONFIG_H263_ENCODER)
3087 ff_h263_encode_gob_header(s, mb_y);
3091 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3092 int bits= put_bits_count(&s->pb);
3093 s->misc_bits+= bits - s->last_bits;
3097 s->ptr_lastgob += current_packet_size;
3098 s->first_slice_line=1;
3099 s->resync_mb_x=mb_x;
3100 s->resync_mb_y=mb_y;
3104 if( (s->resync_mb_x == s->mb_x)
3105 && s->resync_mb_y+1 == s->mb_y){
3106 s->first_slice_line=0;
3110 s->dquant=0; //only for QP_RD
3112 update_mb_info(s, 0);
3114 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3116 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3118 copy_context_before_encode(&backup_s, s, -1);
3120 best_s.data_partitioning= s->data_partitioning;
3121 best_s.partitioned_frame= s->partitioned_frame;
3122 if(s->data_partitioning){
3123 backup_s.pb2= s->pb2;
3124 backup_s.tex_pb= s->tex_pb;
3127 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3128 s->mv_dir = MV_DIR_FORWARD;
3129 s->mv_type = MV_TYPE_16X16;
3131 s->mv[0][0][0] = s->p_mv_table[xy][0];
3132 s->mv[0][0][1] = s->p_mv_table[xy][1];
3133 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3134 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3136 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3137 s->mv_dir = MV_DIR_FORWARD;
3138 s->mv_type = MV_TYPE_FIELD;
3141 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3142 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3143 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3145 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3146 &dmin, &next_block, 0, 0);
3148 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3149 s->mv_dir = MV_DIR_FORWARD;
3150 s->mv_type = MV_TYPE_16X16;
3154 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3155 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3157 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3158 s->mv_dir = MV_DIR_FORWARD;
3159 s->mv_type = MV_TYPE_8X8;
3162 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3163 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3165 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3166 &dmin, &next_block, 0, 0);
3168 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3169 s->mv_dir = MV_DIR_FORWARD;
3170 s->mv_type = MV_TYPE_16X16;
3172 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3173 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3174 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3175 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3177 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3178 s->mv_dir = MV_DIR_BACKWARD;
3179 s->mv_type = MV_TYPE_16X16;
3181 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3182 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3183 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3184 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3186 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3187 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3188 s->mv_type = MV_TYPE_16X16;
3190 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3191 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3192 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3193 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3194 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3195 &dmin, &next_block, 0, 0);
3197 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3198 s->mv_dir = MV_DIR_FORWARD;
3199 s->mv_type = MV_TYPE_FIELD;
3202 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3203 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3204 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3206 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3207 &dmin, &next_block, 0, 0);
3209 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3210 s->mv_dir = MV_DIR_BACKWARD;
3211 s->mv_type = MV_TYPE_FIELD;
3214 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3215 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3216 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3218 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3219 &dmin, &next_block, 0, 0);
3221 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3222 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3223 s->mv_type = MV_TYPE_FIELD;
3225 for(dir=0; dir<2; dir++){
3227 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3228 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3229 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3232 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3233 &dmin, &next_block, 0, 0);
3235 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3237 s->mv_type = MV_TYPE_16X16;
3241 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3242 &dmin, &next_block, 0, 0);
3243 if(s->h263_pred || s->h263_aic){
3245 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3247 ff_clean_intra_table_entries(s); //old mode?
3251 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3252 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3253 const int last_qp= backup_s.qscale;
3256 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3257 static const int dquant_tab[4]={-1,1,-2,2};
3258 int storecoefs = s->mb_intra && s->dc_val[0];
3260 av_assert2(backup_s.dquant == 0);
3263 s->mv_dir= best_s.mv_dir;
3264 s->mv_type = MV_TYPE_16X16;
3265 s->mb_intra= best_s.mb_intra;
3266 s->mv[0][0][0] = best_s.mv[0][0][0];
3267 s->mv[0][0][1] = best_s.mv[0][0][1];
3268 s->mv[1][0][0] = best_s.mv[1][0][0];
3269 s->mv[1][0][1] = best_s.mv[1][0][1];
3271 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3272 for(; qpi<4; qpi++){
3273 int dquant= dquant_tab[qpi];
3274 qp= last_qp + dquant;
3275 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3277 backup_s.dquant= dquant;
3280 dc[i]= s->dc_val[0][ s->block_index[i] ];
3281 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3285 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3286 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3287 if(best_s.qscale != qp){
3290 s->dc_val[0][ s->block_index[i] ]= dc[i];
3291 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3298 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3299 int mx= s->b_direct_mv_table[xy][0];
3300 int my= s->b_direct_mv_table[xy][1];
3302 backup_s.dquant = 0;
3303 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3305 ff_mpeg4_set_direct_mv(s, mx, my);
3306 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3307 &dmin, &next_block, mx, my);
3309 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3310 backup_s.dquant = 0;
3311 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3313 ff_mpeg4_set_direct_mv(s, 0, 0);
3314 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3315 &dmin, &next_block, 0, 0);
3317 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3320 coded |= s->block_last_index[i];
3323 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3324 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3325 mx=my=0; //FIXME find the one we actually used
3326 ff_mpeg4_set_direct_mv(s, mx, my);
3327 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3335 s->mv_dir= best_s.mv_dir;
3336 s->mv_type = best_s.mv_type;
3338 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3339 s->mv[0][0][1] = best_s.mv[0][0][1];
3340 s->mv[1][0][0] = best_s.mv[1][0][0];
3341 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3344 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3345 &dmin, &next_block, mx, my);
3350 s->current_picture.qscale_table[xy] = best_s.qscale;
3352 copy_context_after_encode(s, &best_s, -1);
3354 pb_bits_count= put_bits_count(&s->pb);
3355 flush_put_bits(&s->pb);
3356 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3359 if(s->data_partitioning){
3360 pb2_bits_count= put_bits_count(&s->pb2);
3361 flush_put_bits(&s->pb2);
3362 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3363 s->pb2= backup_s.pb2;
3365 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3366 flush_put_bits(&s->tex_pb);
3367 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3368 s->tex_pb= backup_s.tex_pb;
3370 s->last_bits= put_bits_count(&s->pb);
3372 if (CONFIG_H263_ENCODER &&
3373 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3374 ff_h263_update_motion_val(s);
3376 if(next_block==0){ //FIXME 16 vs linesize16
3377 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3378 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3379 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3382 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3383 ff_mpv_reconstruct_mb(s, s->block);
3385 int motion_x = 0, motion_y = 0;
3386 s->mv_type=MV_TYPE_16X16;
3387 // only one MB-Type possible
3390 case CANDIDATE_MB_TYPE_INTRA:
3393 motion_x= s->mv[0][0][0] = 0;
3394 motion_y= s->mv[0][0][1] = 0;
3396 case CANDIDATE_MB_TYPE_INTER:
3397 s->mv_dir = MV_DIR_FORWARD;
3399 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3400 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3402 case CANDIDATE_MB_TYPE_INTER_I:
3403 s->mv_dir = MV_DIR_FORWARD;
3404 s->mv_type = MV_TYPE_FIELD;
3407 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3408 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3409 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3412 case CANDIDATE_MB_TYPE_INTER4V:
3413 s->mv_dir = MV_DIR_FORWARD;
3414 s->mv_type = MV_TYPE_8X8;
3417 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3418 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3421 case CANDIDATE_MB_TYPE_DIRECT:
3422 if (CONFIG_MPEG4_ENCODER) {
3423 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3425 motion_x=s->b_direct_mv_table[xy][0];
3426 motion_y=s->b_direct_mv_table[xy][1];
3427 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3430 case CANDIDATE_MB_TYPE_DIRECT0:
3431 if (CONFIG_MPEG4_ENCODER) {
3432 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3434 ff_mpeg4_set_direct_mv(s, 0, 0);
3437 case CANDIDATE_MB_TYPE_BIDIR:
3438 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3440 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3441 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3442 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3443 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3445 case CANDIDATE_MB_TYPE_BACKWARD:
3446 s->mv_dir = MV_DIR_BACKWARD;
3448 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3449 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3451 case CANDIDATE_MB_TYPE_FORWARD:
3452 s->mv_dir = MV_DIR_FORWARD;
3454 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3455 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3457 case CANDIDATE_MB_TYPE_FORWARD_I:
3458 s->mv_dir = MV_DIR_FORWARD;
3459 s->mv_type = MV_TYPE_FIELD;
3462 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3463 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3464 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3467 case CANDIDATE_MB_TYPE_BACKWARD_I:
3468 s->mv_dir = MV_DIR_BACKWARD;
3469 s->mv_type = MV_TYPE_FIELD;
3472 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3473 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3474 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3477 case CANDIDATE_MB_TYPE_BIDIR_I:
3478 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3479 s->mv_type = MV_TYPE_FIELD;
3481 for(dir=0; dir<2; dir++){
3483 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3484 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3485 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3490 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3493 encode_mb(s, motion_x, motion_y);
3495 // RAL: Update last macroblock type
3496 s->last_mv_dir = s->mv_dir;
3498 if (CONFIG_H263_ENCODER &&
3499 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3500 ff_h263_update_motion_val(s);
3502 ff_mpv_reconstruct_mb(s, s->block);
3505 /* clean the MV table in IPS frames for direct mode in B-frames */
3506 if(s->mb_intra /* && I,P,S_TYPE */){
3507 s->p_mv_table[xy][0]=0;
3508 s->p_mv_table[xy][1]=0;
3511 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3515 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3516 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3518 s->current_picture.encoding_error[0] += sse(
3519 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3520 s->dest[0], w, h, s->linesize);
3521 s->current_picture.encoding_error[1] += sse(
3522 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3523 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3524 s->current_picture.encoding_error[2] += sse(
3525 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3526 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3529 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3530 ff_h263_loop_filter(s);
3532 ff_dlog(s->avctx, "MB %d %d bits\n",
3533 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3537 //not beautiful here but we must write it before flushing so it has to be here
3538 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3539 ff_msmpeg4_encode_ext_header(s);
3543 #if FF_API_RTP_CALLBACK
3544 FF_DISABLE_DEPRECATION_WARNINGS
3545 /* Send the last GOB if RTP */
3546 if (s->avctx->rtp_callback) {
3547 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3548 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3549 /* Call the RTP callback to send the last GOB */
3551 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3553 FF_ENABLE_DEPRECATION_WARNINGS
3559 #define MERGE(field) dst->field += src->field; src->field=0
3560 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3561 MERGE(me.scene_change_score);
3562 MERGE(me.mc_mb_var_sum_temp);
3563 MERGE(me.mb_var_sum_temp);
3566 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3569 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3570 MERGE(dct_count[1]);
3579 MERGE(er.error_count);
3580 MERGE(padding_bug_score);
3581 MERGE(current_picture.encoding_error[0]);
3582 MERGE(current_picture.encoding_error[1]);
3583 MERGE(current_picture.encoding_error[2]);
3585 if (dst->noise_reduction){
3586 for(i=0; i<64; i++){
3587 MERGE(dct_error_sum[0][i]);
3588 MERGE(dct_error_sum[1][i]);
3592 av_assert1(put_bits_count(&src->pb) % 8 ==0);
3593 av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3594 ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3595 flush_put_bits(&dst->pb);
3598 static int estimate_qp(MpegEncContext *s, int dry_run){
3599 if (s->next_lambda){
3600 s->current_picture_ptr->f->quality =
3601 s->current_picture.f->quality = s->next_lambda;
3602 if(!dry_run) s->next_lambda= 0;
3603 } else if (!s->fixed_qscale) {
3604 int quality = ff_rate_estimate_qscale(s, dry_run);
3605 s->current_picture_ptr->f->quality =
3606 s->current_picture.f->quality = quality;
3607 if (s->current_picture.f->quality < 0)
3611 if(s->adaptive_quant){
3612 switch(s->codec_id){
3613 case AV_CODEC_ID_MPEG4:
3614 if (CONFIG_MPEG4_ENCODER)
3615 ff_clean_mpeg4_qscales(s);
3617 case AV_CODEC_ID_H263:
3618 case AV_CODEC_ID_H263P:
3619 case AV_CODEC_ID_FLV1:
3620 if (CONFIG_H263_ENCODER)
3621 ff_clean_h263_qscales(s);
3624 ff_init_qscale_tab(s);
3627 s->lambda= s->lambda_table[0];
3630 s->lambda = s->current_picture.f->quality;
3635 /* must be called before writing the header */
3636 static void set_frame_distances(MpegEncContext * s){
3637 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3638 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3640 if(s->pict_type==AV_PICTURE_TYPE_B){
3641 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3642 av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3644 s->pp_time= s->time - s->last_non_b_time;
3645 s->last_non_b_time= s->time;
3646 av_assert1(s->picture_number==0 || s->pp_time > 0);
3650 static int encode_picture(MpegEncContext *s, int picture_number)
3654 int context_count = s->slice_context_count;
3656 s->picture_number = picture_number;
3658 /* Reset the average MB variance */
3659 s->me.mb_var_sum_temp =
3660 s->me.mc_mb_var_sum_temp = 0;
3662 /* we need to initialize some time vars before we can encode B-frames */
3663 // RAL: Condition added for MPEG1VIDEO
3664 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3665 set_frame_distances(s);
3666 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3667 ff_set_mpeg4_time(s);
3669 s->me.scene_change_score=0;
3671 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3673 if(s->pict_type==AV_PICTURE_TYPE_I){
3674 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3675 else s->no_rounding=0;
3676 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3677 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3678 s->no_rounding ^= 1;
3681 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3682 if (estimate_qp(s,1) < 0)
3684 ff_get_2pass_fcode(s);
3685 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3686 if(s->pict_type==AV_PICTURE_TYPE_B)
3687 s->lambda= s->last_lambda_for[s->pict_type];
3689 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3693 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3694 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3695 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3696 s->q_chroma_intra_matrix = s->q_intra_matrix;
3697 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3700 s->mb_intra=0; //for the rate distortion & bit compare functions
3701 for(i=1; i<context_count; i++){
3702 ret = ff_update_duplicate_context(s->thread_context[i], s);
3710 /* Estimate motion for every MB */
3711 if(s->pict_type != AV_PICTURE_TYPE_I){
3712 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3713 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3714 if (s->pict_type != AV_PICTURE_TYPE_B) {
3715 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3717 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3721 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3722 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3724 for(i=0; i<s->mb_stride*s->mb_height; i++)
3725 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3727 if(!s->fixed_qscale){
3728 /* finding spatial complexity for I-frame rate control */
3729 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3732 for(i=1; i<context_count; i++){
3733 merge_context_after_me(s, s->thread_context[i]);
3735 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3736 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3739 if (s->me.scene_change_score > s->scenechange_threshold &&
3740 s->pict_type == AV_PICTURE_TYPE_P) {
3741 s->pict_type= AV_PICTURE_TYPE_I;
3742 for(i=0; i<s->mb_stride*s->mb_height; i++)
3743 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3744 if(s->msmpeg4_version >= 3)
3746 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3747 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3751 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3752 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3754 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3756 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3757 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3758 s->f_code= FFMAX3(s->f_code, a, b);
3761 ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
3762 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3763 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3767 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3768 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3773 if(s->pict_type==AV_PICTURE_TYPE_B){
3776 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3777 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3778 s->f_code = FFMAX(a, b);
3780 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3781 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3782 s->b_code = FFMAX(a, b);
3784 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3785 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3786 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3787 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3788 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3790 for(dir=0; dir<2; dir++){
3793 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3794 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3795 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3796 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3804 if (estimate_qp(s, 0) < 0)
3807 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3808 s->pict_type == AV_PICTURE_TYPE_I &&
3809 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3810 s->qscale= 3; //reduce clipping problems
3812 if (s->out_format == FMT_MJPEG) {
3813 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3814 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3816 if (s->avctx->intra_matrix) {
3818 luma_matrix = s->avctx->intra_matrix;
3820 if (s->avctx->chroma_intra_matrix)
3821 chroma_matrix = s->avctx->chroma_intra_matrix;
3823 /* for mjpeg, we do include qscale in the matrix */
3825 int j = s->idsp.idct_permutation[i];
3827 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3828 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3830 s->y_dc_scale_table=
3831 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3832 s->chroma_intra_matrix[0] =
3833 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3834 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3835 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3836 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3837 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3840 if(s->codec_id == AV_CODEC_ID_AMV){
3841 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3842 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3844 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3846 s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3847 s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3849 s->y_dc_scale_table= y;
3850 s->c_dc_scale_table= c;
3851 s->intra_matrix[0] = 13;
3852 s->chroma_intra_matrix[0] = 14;
3853 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3854 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3855 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3856 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3860 if (s->out_format == FMT_SPEEDHQ) {
3861 s->y_dc_scale_table=
3862 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3865 //FIXME var duplication
3866 s->current_picture_ptr->f->key_frame =
3867 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3868 s->current_picture_ptr->f->pict_type =
3869 s->current_picture.f->pict_type = s->pict_type;
3871 if (s->current_picture.f->key_frame)
3872 s->picture_in_gop_number=0;
3874 s->mb_x = s->mb_y = 0;
3875 s->last_bits= put_bits_count(&s->pb);
3876 switch(s->out_format) {
3877 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3879 /* s->huffman == HUFFMAN_TABLE_OPTIMAL can only be true for MJPEG. */
3880 if (!CONFIG_MJPEG_ENCODER || s->huffman != HUFFMAN_TABLE_OPTIMAL)
3881 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3882 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3886 if (CONFIG_SPEEDHQ_ENCODER)
3887 ff_speedhq_encode_picture_header(s);
3890 if (CONFIG_H261_ENCODER)
3891 ff_h261_encode_picture_header(s, picture_number);
3894 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3895 ff_wmv2_encode_picture_header(s, picture_number);
3896 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3897 ff_msmpeg4_encode_picture_header(s, picture_number);
3898 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3899 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3902 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3903 ret = ff_rv10_encode_picture_header(s, picture_number);
3907 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3908 ff_rv20_encode_picture_header(s, picture_number);
3909 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3910 ff_flv_encode_picture_header(s, picture_number);
3911 else if (CONFIG_H263_ENCODER)
3912 ff_h263_encode_picture_header(s, picture_number);
3915 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3916 ff_mpeg1_encode_picture_header(s, picture_number);
3921 bits= put_bits_count(&s->pb);
3922 s->header_bits= bits - s->last_bits;
3924 for(i=1; i<context_count; i++){
3925 update_duplicate_context_after_me(s->thread_context[i], s);
3927 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3928 for(i=1; i<context_count; i++){
3929 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3930 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3931 merge_context_after_encode(s, s->thread_context[i]);
3937 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3938 const int intra= s->mb_intra;
3941 s->dct_count[intra]++;
3943 for(i=0; i<64; i++){
3944 int level= block[i];
3948 s->dct_error_sum[intra][i] += level;
3949 level -= s->dct_offset[intra][i];
3950 if(level<0) level=0;
3952 s->dct_error_sum[intra][i] -= level;
3953 level += s->dct_offset[intra][i];
3954 if(level>0) level=0;
3961 static int dct_quantize_trellis_c(MpegEncContext *s,
3962 int16_t *block, int n,
3963 int qscale, int *overflow){
3965 const uint16_t *matrix;
3966 const uint8_t *scantable;
3967 const uint8_t *perm_scantable;
3969 unsigned int threshold1, threshold2;
3981 int coeff_count[64];
3982 int qmul, qadd, start_i, last_non_zero, i, dc;
3983 const int esc_length= s->ac_esc_length;
3985 uint8_t * last_length;
3986 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3989 s->fdsp.fdct(block);
3991 if(s->dct_error_sum)
3992 s->denoise_dct(s, block);
3994 qadd= ((qscale-1)|1)*8;
3996 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3997 else mpeg2_qscale = qscale << 1;
4001 scantable= s->intra_scantable.scantable;
4002 perm_scantable= s->intra_scantable.permutated;
4010 /* For AIC we skip quant/dequant of INTRADC */
4015 /* note: block[0] is assumed to be positive */
4016 block[0] = (block[0] + (q >> 1)) / q;
4019 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4020 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4021 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4022 bias= 1<<(QMAT_SHIFT-1);
4024 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4025 length = s->intra_chroma_ac_vlc_length;
4026 last_length= s->intra_chroma_ac_vlc_last_length;
4028 length = s->intra_ac_vlc_length;
4029 last_length= s->intra_ac_vlc_last_length;
4032 scantable= s->inter_scantable.scantable;
4033 perm_scantable= s->inter_scantable.permutated;
4036 qmat = s->q_inter_matrix[qscale];
4037 matrix = s->inter_matrix;
4038 length = s->inter_ac_vlc_length;
4039 last_length= s->inter_ac_vlc_last_length;
4043 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4044 threshold2= (threshold1<<1);
4046 for(i=63; i>=start_i; i--) {
4047 const int j = scantable[i];
4048 int level = block[j] * qmat[j];
4050 if(((unsigned)(level+threshold1))>threshold2){
4056 for(i=start_i; i<=last_non_zero; i++) {
4057 const int j = scantable[i];
4058 int level = block[j] * qmat[j];
4060 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4061 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4062 if(((unsigned)(level+threshold1))>threshold2){
4064 level= (bias + level)>>QMAT_SHIFT;
4066 coeff[1][i]= level-1;
4067 // coeff[2][k]= level-2;
4069 level= (bias - level)>>QMAT_SHIFT;
4070 coeff[0][i]= -level;
4071 coeff[1][i]= -level+1;
4072 // coeff[2][k]= -level+2;
4074 coeff_count[i]= FFMIN(level, 2);
4075 av_assert2(coeff_count[i]);
4078 coeff[0][i]= (level>>31)|1;
4083 *overflow= s->max_qcoeff < max; //overflow might have happened
4085 if(last_non_zero < start_i){
4086 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4087 return last_non_zero;
4090 score_tab[start_i]= 0;
4091 survivor[0]= start_i;
4094 for(i=start_i; i<=last_non_zero; i++){
4095 int level_index, j, zero_distortion;
4096 int dct_coeff= FFABS(block[ scantable[i] ]);
4097 int best_score=256*256*256*120;
4099 if (s->fdsp.fdct == ff_fdct_ifast)
4100 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4101 zero_distortion= dct_coeff*dct_coeff;
4103 for(level_index=0; level_index < coeff_count[i]; level_index++){
4105 int level= coeff[level_index][i];
4106 const int alevel= FFABS(level);
4111 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4112 unquant_coeff= alevel*qmul + qadd;
4113 } else if(s->out_format == FMT_MJPEG) {
4114 j = s->idsp.idct_permutation[scantable[i]];
4115 unquant_coeff = alevel * matrix[j] * 8;
4117 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4119 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4120 unquant_coeff = (unquant_coeff - 1) | 1;
4122 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4123 unquant_coeff = (unquant_coeff - 1) | 1;
4128 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4130 if((level&(~127)) == 0){
4131 for(j=survivor_count-1; j>=0; j--){
4132 int run= i - survivor[j];
4133 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4134 score += score_tab[i-run];
4136 if(score < best_score){
4139 level_tab[i+1]= level-64;
4143 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4144 for(j=survivor_count-1; j>=0; j--){
4145 int run= i - survivor[j];
4146 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4147 score += score_tab[i-run];
4148 if(score < last_score){
4151 last_level= level-64;
4157 distortion += esc_length*lambda;
4158 for(j=survivor_count-1; j>=0; j--){
4159 int run= i - survivor[j];
4160 int score= distortion + score_tab[i-run];
4162 if(score < best_score){
4165 level_tab[i+1]= level-64;
4169 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4170 for(j=survivor_count-1; j>=0; j--){
4171 int run= i - survivor[j];
4172 int score= distortion + score_tab[i-run];
4173 if(score < last_score){
4176 last_level= level-64;
4184 score_tab[i+1]= best_score;
4186 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4187 if(last_non_zero <= 27){
4188 for(; survivor_count; survivor_count--){
4189 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4193 for(; survivor_count; survivor_count--){
4194 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4199 survivor[ survivor_count++ ]= i+1;
4202 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4203 last_score= 256*256*256*120;
4204 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4205 int score= score_tab[i];
4207 score += lambda * 2; // FIXME more exact?
4209 if(score < last_score){
4212 last_level= level_tab[i];
4213 last_run= run_tab[i];
4218 s->coded_score[n] = last_score;
4220 dc= FFABS(block[0]);
4221 last_non_zero= last_i - 1;
4222 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4224 if(last_non_zero < start_i)
4225 return last_non_zero;
4227 if(last_non_zero == 0 && start_i == 0){
4229 int best_score= dc * dc;
4231 for(i=0; i<coeff_count[0]; i++){
4232 int level= coeff[i][0];
4233 int alevel= FFABS(level);
4234 int unquant_coeff, score, distortion;
4236 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4237 unquant_coeff= (alevel*qmul + qadd)>>3;
4239 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4240 unquant_coeff = (unquant_coeff - 1) | 1;
4242 unquant_coeff = (unquant_coeff + 4) >> 3;
4243 unquant_coeff<<= 3 + 3;
4245 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4247 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4248 else score= distortion + esc_length*lambda;
4250 if(score < best_score){
4252 best_level= level - 64;
4255 block[0]= best_level;
4256 s->coded_score[n] = best_score - dc*dc;
4257 if(best_level == 0) return -1;
4258 else return last_non_zero;
4262 av_assert2(last_level);
4264 block[ perm_scantable[last_non_zero] ]= last_level;
4267 for(; i>start_i; i -= run_tab[i] + 1){
4268 block[ perm_scantable[i-1] ]= level_tab[i];
4271 return last_non_zero;
4274 static int16_t basis[64][64];
4276 static void build_basis(uint8_t *perm){
4283 double s= 0.25*(1<<BASIS_SHIFT);
4285 int perm_index= perm[index];
4286 if(i==0) s*= sqrt(0.5);
4287 if(j==0) s*= sqrt(0.5);
4288 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4295 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4296 int16_t *block, int16_t *weight, int16_t *orig,
4299 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4300 const uint8_t *scantable;
4301 const uint8_t *perm_scantable;
4302 // unsigned int threshold1, threshold2;
4307 int qmul, qadd, start_i, last_non_zero, i, dc;
4309 uint8_t * last_length;
4311 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4313 if(basis[0][0] == 0)
4314 build_basis(s->idsp.idct_permutation);
4319 scantable= s->intra_scantable.scantable;
4320 perm_scantable= s->intra_scantable.permutated;
4327 /* For AIC we skip quant/dequant of INTRADC */
4331 q <<= RECON_SHIFT-3;
4332 /* note: block[0] is assumed to be positive */
4334 // block[0] = (block[0] + (q >> 1)) / q;
4336 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4337 // bias= 1<<(QMAT_SHIFT-1);
4338 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4339 length = s->intra_chroma_ac_vlc_length;
4340 last_length= s->intra_chroma_ac_vlc_last_length;
4342 length = s->intra_ac_vlc_length;
4343 last_length= s->intra_ac_vlc_last_length;
4346 scantable= s->inter_scantable.scantable;
4347 perm_scantable= s->inter_scantable.permutated;
4350 length = s->inter_ac_vlc_length;
4351 last_length= s->inter_ac_vlc_last_length;
4353 last_non_zero = s->block_last_index[n];
4355 dc += (1<<(RECON_SHIFT-1));
4356 for(i=0; i<64; i++){
4357 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4361 for(i=0; i<64; i++){
4366 w= FFABS(weight[i]) + qns*one;
4367 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4370 // w=weight[i] = (63*qns + (w/2)) / w;
4373 av_assert2(w<(1<<6));
4376 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4380 for(i=start_i; i<=last_non_zero; i++){
4381 int j= perm_scantable[i];
4382 const int level= block[j];
4386 if(level<0) coeff= qmul*level - qadd;
4387 else coeff= qmul*level + qadd;
4388 run_tab[rle_index++]=run;
4391 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4398 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4401 int run2, best_unquant_change=0, analyze_gradient;
4402 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4404 if(analyze_gradient){
4405 for(i=0; i<64; i++){
4408 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4414 const int level= block[0];
4415 int change, old_coeff;
4417 av_assert2(s->mb_intra);
4421 for(change=-1; change<=1; change+=2){
4422 int new_level= level + change;
4423 int score, new_coeff;
4425 new_coeff= q*new_level;
4426 if(new_coeff >= 2048 || new_coeff < 0)
4429 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4430 new_coeff - old_coeff);
4431 if(score<best_score){
4434 best_change= change;
4435 best_unquant_change= new_coeff - old_coeff;
4442 run2= run_tab[rle_index++];
4446 for(i=start_i; i<64; i++){
4447 int j= perm_scantable[i];
4448 const int level= block[j];
4449 int change, old_coeff;
4451 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4455 if(level<0) old_coeff= qmul*level - qadd;
4456 else old_coeff= qmul*level + qadd;
4457 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4461 av_assert2(run2>=0 || i >= last_non_zero );
4464 for(change=-1; change<=1; change+=2){
4465 int new_level= level + change;
4466 int score, new_coeff, unquant_change;
4469 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4473 if(new_level<0) new_coeff= qmul*new_level - qadd;
4474 else new_coeff= qmul*new_level + qadd;
4475 if(new_coeff >= 2048 || new_coeff <= -2048)
4477 //FIXME check for overflow
4480 if(level < 63 && level > -63){
4481 if(i < last_non_zero)
4482 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4483 - length[UNI_AC_ENC_INDEX(run, level+64)];
4485 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4486 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4489 av_assert2(FFABS(new_level)==1);
4491 if(analyze_gradient){
4492 int g= d1[ scantable[i] ];
4493 if(g && (g^new_level) >= 0)
4497 if(i < last_non_zero){
4498 int next_i= i + run2 + 1;
4499 int next_level= block[ perm_scantable[next_i] ] + 64;
4501 if(next_level&(~127))
4504 if(next_i < last_non_zero)
4505 score += length[UNI_AC_ENC_INDEX(run, 65)]
4506 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4507 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4509 score += length[UNI_AC_ENC_INDEX(run, 65)]
4510 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4511 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4513 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4515 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4516 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4522 av_assert2(FFABS(level)==1);
4524 if(i < last_non_zero){
4525 int next_i= i + run2 + 1;
4526 int next_level= block[ perm_scantable[next_i] ] + 64;
4528 if(next_level&(~127))
4531 if(next_i < last_non_zero)
4532 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4533 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4534 - length[UNI_AC_ENC_INDEX(run, 65)];
4536 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4537 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4538 - length[UNI_AC_ENC_INDEX(run, 65)];
4540 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4542 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4543 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4550 unquant_change= new_coeff - old_coeff;
4551 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4553 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4555 if(score<best_score){
4558 best_change= change;
4559 best_unquant_change= unquant_change;
4563 prev_level= level + 64;
4564 if(prev_level&(~127))
4574 int j= perm_scantable[ best_coeff ];
4576 block[j] += best_change;
4578 if(best_coeff > last_non_zero){
4579 last_non_zero= best_coeff;
4580 av_assert2(block[j]);
4582 for(; last_non_zero>=start_i; last_non_zero--){
4583 if(block[perm_scantable[last_non_zero]])
4590 for(i=start_i; i<=last_non_zero; i++){
4591 int j= perm_scantable[i];
4592 const int level= block[j];
4595 run_tab[rle_index++]=run;
4602 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4608 return last_non_zero;
4612 * Permute an 8x8 block according to permutation.
4613 * @param block the block which will be permuted according to
4614 * the given permutation vector
4615 * @param permutation the permutation vector
4616 * @param last the last non zero coefficient in scantable order, used to
4617 * speed the permutation up
4618 * @param scantable the used scantable, this is only used to speed the
4619 * permutation up, the block is not (inverse) permutated
4620 * to scantable order!
4622 void ff_block_permute(int16_t *block, uint8_t *permutation,
4623 const uint8_t *scantable, int last)
4630 //FIXME it is ok but not clean and might fail for some permutations
4631 // if (permutation[1] == 1)
4634 for (i = 0; i <= last; i++) {
4635 const int j = scantable[i];
4640 for (i = 0; i <= last; i++) {
4641 const int j = scantable[i];
4642 const int perm_j = permutation[j];
4643 block[perm_j] = temp[j];
4647 int ff_dct_quantize_c(MpegEncContext *s,
4648 int16_t *block, int n,
4649 int qscale, int *overflow)
4651 int i, j, level, last_non_zero, q, start_i;
4653 const uint8_t *scantable;
4656 unsigned int threshold1, threshold2;
4658 s->fdsp.fdct(block);
4660 if(s->dct_error_sum)
4661 s->denoise_dct(s, block);
4664 scantable= s->intra_scantable.scantable;
4672 /* For AIC we skip quant/dequant of INTRADC */
4675 /* note: block[0] is assumed to be positive */
4676 block[0] = (block[0] + (q >> 1)) / q;
4679 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4680 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4682 scantable= s->inter_scantable.scantable;
4685 qmat = s->q_inter_matrix[qscale];
4686 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4688 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4689 threshold2= (threshold1<<1);
4690 for(i=63;i>=start_i;i--) {
4692 level = block[j] * qmat[j];
4694 if(((unsigned)(level+threshold1))>threshold2){
4701 for(i=start_i; i<=last_non_zero; i++) {
4703 level = block[j] * qmat[j];
4705 // if( bias+level >= (1<<QMAT_SHIFT)
4706 // || bias-level >= (1<<QMAT_SHIFT)){
4707 if(((unsigned)(level+threshold1))>threshold2){
4709 level= (bias + level)>>QMAT_SHIFT;
4712 level= (bias - level)>>QMAT_SHIFT;
4720 *overflow= s->max_qcoeff < max; //overflow might have happened
4722 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4723 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4724 ff_block_permute(block, s->idsp.idct_permutation,
4725 scantable, last_non_zero);
4727 return last_non_zero;
4730 #define OFFSET(x) offsetof(MpegEncContext, x)
4731 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4732 static const AVOption h263_options[] = {
4733 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4734 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4739 static const AVClass h263_class = {
4740 .class_name = "H.263 encoder",
4741 .item_name = av_default_item_name,
4742 .option = h263_options,
4743 .version = LIBAVUTIL_VERSION_INT,
4746 AVCodec ff_h263_encoder = {
4748 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4749 .type = AVMEDIA_TYPE_VIDEO,
4750 .id = AV_CODEC_ID_H263,
4751 .priv_data_size = sizeof(MpegEncContext),
4752 .init = ff_mpv_encode_init,
4753 .encode2 = ff_mpv_encode_picture,
4754 .close = ff_mpv_encode_end,
4755 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4756 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4757 .priv_class = &h263_class,
4760 static const AVOption h263p_options[] = {
4761 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4762 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4763 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4764 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4768 static const AVClass h263p_class = {
4769 .class_name = "H.263p encoder",
4770 .item_name = av_default_item_name,
4771 .option = h263p_options,
4772 .version = LIBAVUTIL_VERSION_INT,
4775 AVCodec ff_h263p_encoder = {
4777 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4778 .type = AVMEDIA_TYPE_VIDEO,
4779 .id = AV_CODEC_ID_H263P,
4780 .priv_data_size = sizeof(MpegEncContext),
4781 .init = ff_mpv_encode_init,
4782 .encode2 = ff_mpv_encode_picture,
4783 .close = ff_mpv_encode_end,
4784 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4785 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4786 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4787 .priv_class = &h263p_class,
4790 static const AVClass msmpeg4v2_class = {
4791 .class_name = "msmpeg4v2 encoder",
4792 .item_name = av_default_item_name,
4793 .option = ff_mpv_generic_options,
4794 .version = LIBAVUTIL_VERSION_INT,
4797 AVCodec ff_msmpeg4v2_encoder = {
4798 .name = "msmpeg4v2",
4799 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4800 .type = AVMEDIA_TYPE_VIDEO,
4801 .id = AV_CODEC_ID_MSMPEG4V2,
4802 .priv_data_size = sizeof(MpegEncContext),
4803 .init = ff_mpv_encode_init,
4804 .encode2 = ff_mpv_encode_picture,
4805 .close = ff_mpv_encode_end,
4806 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4807 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4808 .priv_class = &msmpeg4v2_class,
4811 static const AVClass msmpeg4v3_class = {
4812 .class_name = "msmpeg4v3 encoder",
4813 .item_name = av_default_item_name,
4814 .option = ff_mpv_generic_options,
4815 .version = LIBAVUTIL_VERSION_INT,
4818 AVCodec ff_msmpeg4v3_encoder = {
4820 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4821 .type = AVMEDIA_TYPE_VIDEO,
4822 .id = AV_CODEC_ID_MSMPEG4V3,
4823 .priv_data_size = sizeof(MpegEncContext),
4824 .init = ff_mpv_encode_init,
4825 .encode2 = ff_mpv_encode_picture,
4826 .close = ff_mpv_encode_end,
4827 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4828 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4829 .priv_class = &msmpeg4v3_class,
4832 static const AVClass wmv1_class = {
4833 .class_name = "wmv1 encoder",
4834 .item_name = av_default_item_name,
4835 .option = ff_mpv_generic_options,
4836 .version = LIBAVUTIL_VERSION_INT,
4839 AVCodec ff_wmv1_encoder = {
4841 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4842 .type = AVMEDIA_TYPE_VIDEO,
4843 .id = AV_CODEC_ID_WMV1,
4844 .priv_data_size = sizeof(MpegEncContext),
4845 .init = ff_mpv_encode_init,
4846 .encode2 = ff_mpv_encode_picture,
4847 .close = ff_mpv_encode_end,
4848 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4849 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4850 .priv_class = &wmv1_class,