2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/mem_internal.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/thread.h"
47 #include "mpegvideo.h"
48 #include "mpegvideodata.h"
52 #include "mjpegenc_common.h"
54 #include "mpegutils.h"
56 #include "speedhqenc.h"
58 #include "pixblockdsp.h"
62 #include "aandcttab.h"
64 #include "mpeg4video.h"
66 #include "bytestream.h"
69 #include "packet_internal.h"
73 #define QUANT_BIAS_SHIFT 8
75 #define QMAT_SHIFT_MMX 16
78 static int encode_picture(MpegEncContext *s, int picture_number);
79 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
80 static int sse_mb(MpegEncContext *s);
81 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
82 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
84 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
85 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
87 const AVOption ff_mpv_generic_options[] = {
92 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
93 uint16_t (*qmat16)[2][64],
94 const uint16_t *quant_matrix,
95 int bias, int qmin, int qmax, int intra)
97 FDCTDSPContext *fdsp = &s->fdsp;
101 for (qscale = qmin; qscale <= qmax; qscale++) {
105 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
106 else qscale2 = qscale << 1;
108 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
110 fdsp->fdct == ff_faandct ||
111 #endif /* CONFIG_FAANDCT */
112 fdsp->fdct == ff_jpeg_fdct_islow_10) {
113 for (i = 0; i < 64; i++) {
114 const int j = s->idsp.idct_permutation[i];
115 int64_t den = (int64_t) qscale2 * quant_matrix[j];
116 /* 16 <= qscale * quant_matrix[i] <= 7905
117 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
118 * 19952 <= x <= 249205026
119 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
120 * 3444240 >= (1 << 36) / (x) >= 275 */
122 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
124 } else if (fdsp->fdct == ff_fdct_ifast) {
125 for (i = 0; i < 64; i++) {
126 const int j = s->idsp.idct_permutation[i];
127 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
128 /* 16 <= qscale * quant_matrix[i] <= 7905
129 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
130 * 19952 <= x <= 249205026
131 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
132 * 3444240 >= (1 << 36) / (x) >= 275 */
134 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
137 for (i = 0; i < 64; i++) {
138 const int j = s->idsp.idct_permutation[i];
139 int64_t den = (int64_t) qscale2 * quant_matrix[j];
140 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
141 * Assume x = qscale * quant_matrix[i]
143 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
144 * so 32768 >= (1 << 19) / (x) >= 67 */
145 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
146 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
147 // (qscale * quant_matrix[i]);
148 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
150 if (qmat16[qscale][0][i] == 0 ||
151 qmat16[qscale][0][i] == 128 * 256)
152 qmat16[qscale][0][i] = 128 * 256 - 1;
153 qmat16[qscale][1][i] =
154 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
155 qmat16[qscale][0][i]);
159 for (i = intra; i < 64; i++) {
161 if (fdsp->fdct == ff_fdct_ifast) {
162 max = (8191LL * ff_aanscales[i]) >> 14;
164 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
170 av_log(s->avctx, AV_LOG_INFO,
171 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
176 static inline void update_qscale(MpegEncContext *s)
178 if (s->q_scale_type == 1 && 0) {
180 int bestdiff=INT_MAX;
183 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
184 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
185 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
186 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
188 if (diff < bestdiff) {
195 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
196 (FF_LAMBDA_SHIFT + 7);
197 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
200 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
204 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
210 for (i = 0; i < 64; i++) {
211 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
218 * init s->current_picture.qscale_table from s->lambda_table
220 void ff_init_qscale_tab(MpegEncContext *s)
222 int8_t * const qscale_table = s->current_picture.qscale_table;
225 for (i = 0; i < s->mb_num; i++) {
226 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
227 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
228 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
233 static void update_duplicate_context_after_me(MpegEncContext *dst,
236 #define COPY(a) dst->a= src->a
238 COPY(current_picture);
244 COPY(picture_in_gop_number);
245 COPY(gop_picture_number);
246 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
247 COPY(progressive_frame); // FIXME don't set in encode_header
248 COPY(partitioned_frame); // FIXME don't set in encode_header
252 static void mpv_encode_init_static(void)
254 for (int i = -16; i < 16; i++)
255 default_fcode_tab[i + MAX_MV] = 1;
259 * Set the given MpegEncContext to defaults for encoding.
260 * the changed fields will not depend upon the prior state of the MpegEncContext.
262 static void mpv_encode_defaults(MpegEncContext *s)
264 static AVOnce init_static_once = AV_ONCE_INIT;
266 ff_mpv_common_defaults(s);
268 ff_thread_once(&init_static_once, mpv_encode_init_static);
270 s->me.mv_penalty = default_mv_penalty;
271 s->fcode_tab = default_fcode_tab;
273 s->input_picture_number = 0;
274 s->picture_in_gop_number = 0;
277 av_cold int ff_dct_encode_init(MpegEncContext *s)
280 ff_dct_encode_init_x86(s);
282 if (CONFIG_H263_ENCODER)
283 ff_h263dsp_init(&s->h263dsp);
284 if (!s->dct_quantize)
285 s->dct_quantize = ff_dct_quantize_c;
287 s->denoise_dct = denoise_dct_c;
288 s->fast_dct_quantize = s->dct_quantize;
289 if (s->avctx->trellis)
290 s->dct_quantize = dct_quantize_trellis_c;
295 /* init video encoder */
296 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
298 MpegEncContext *s = avctx->priv_data;
299 AVCPBProperties *cpb_props;
302 mpv_encode_defaults(s);
304 switch (avctx->pix_fmt) {
305 case AV_PIX_FMT_YUVJ444P:
306 case AV_PIX_FMT_YUV444P:
307 s->chroma_format = CHROMA_444;
309 case AV_PIX_FMT_YUVJ422P:
310 case AV_PIX_FMT_YUV422P:
311 s->chroma_format = CHROMA_422;
313 case AV_PIX_FMT_YUVJ420P:
314 case AV_PIX_FMT_YUV420P:
316 s->chroma_format = CHROMA_420;
320 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
322 #if FF_API_PRIVATE_OPT
323 FF_DISABLE_DEPRECATION_WARNINGS
324 if (avctx->rtp_payload_size)
325 s->rtp_payload_size = avctx->rtp_payload_size;
326 if (avctx->me_penalty_compensation)
327 s->me_penalty_compensation = avctx->me_penalty_compensation;
329 s->me_pre = avctx->pre_me;
330 FF_ENABLE_DEPRECATION_WARNINGS
333 s->bit_rate = avctx->bit_rate;
334 s->width = avctx->width;
335 s->height = avctx->height;
336 if (avctx->gop_size > 600 &&
337 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
338 av_log(avctx, AV_LOG_WARNING,
339 "keyframe interval too large!, reducing it from %d to %d\n",
340 avctx->gop_size, 600);
341 avctx->gop_size = 600;
343 s->gop_size = avctx->gop_size;
345 if (avctx->max_b_frames > MAX_B_FRAMES) {
346 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
347 "is %d.\n", MAX_B_FRAMES);
348 avctx->max_b_frames = MAX_B_FRAMES;
350 s->max_b_frames = avctx->max_b_frames;
351 s->codec_id = avctx->codec->id;
352 s->strict_std_compliance = avctx->strict_std_compliance;
353 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
354 s->rtp_mode = !!s->rtp_payload_size;
355 s->intra_dc_precision = avctx->intra_dc_precision;
357 // workaround some differences between how applications specify dc precision
358 if (s->intra_dc_precision < 0) {
359 s->intra_dc_precision += 8;
360 } else if (s->intra_dc_precision >= 8)
361 s->intra_dc_precision -= 8;
363 if (s->intra_dc_precision < 0) {
364 av_log(avctx, AV_LOG_ERROR,
365 "intra dc precision must be positive, note some applications use"
366 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
367 return AVERROR(EINVAL);
370 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
373 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
374 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
375 return AVERROR(EINVAL);
377 s->user_specified_pts = AV_NOPTS_VALUE;
379 if (s->gop_size <= 1) {
387 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
389 s->adaptive_quant = (avctx->lumi_masking ||
390 avctx->dark_masking ||
391 avctx->temporal_cplx_masking ||
392 avctx->spatial_cplx_masking ||
395 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
398 s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
400 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
401 switch(avctx->codec_id) {
402 case AV_CODEC_ID_MPEG1VIDEO:
403 case AV_CODEC_ID_MPEG2VIDEO:
404 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
406 case AV_CODEC_ID_MPEG4:
407 case AV_CODEC_ID_MSMPEG4V1:
408 case AV_CODEC_ID_MSMPEG4V2:
409 case AV_CODEC_ID_MSMPEG4V3:
410 if (avctx->rc_max_rate >= 15000000) {
411 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
412 } else if(avctx->rc_max_rate >= 2000000) {
413 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
414 } else if(avctx->rc_max_rate >= 384000) {
415 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
417 avctx->rc_buffer_size = 40;
418 avctx->rc_buffer_size *= 16384;
421 if (avctx->rc_buffer_size) {
422 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
426 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
427 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
428 return AVERROR(EINVAL);
431 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
432 av_log(avctx, AV_LOG_INFO,
433 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
436 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
437 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
438 return AVERROR(EINVAL);
441 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
442 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
443 return AVERROR(EINVAL);
446 if (avctx->rc_max_rate &&
447 avctx->rc_max_rate == avctx->bit_rate &&
448 avctx->rc_max_rate != avctx->rc_min_rate) {
449 av_log(avctx, AV_LOG_INFO,
450 "impossible bitrate constraints, this will fail\n");
453 if (avctx->rc_buffer_size &&
454 avctx->bit_rate * (int64_t)avctx->time_base.num >
455 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
456 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
457 return AVERROR(EINVAL);
460 if (!s->fixed_qscale &&
461 avctx->bit_rate * av_q2d(avctx->time_base) >
462 avctx->bit_rate_tolerance) {
463 av_log(avctx, AV_LOG_WARNING,
464 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
465 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
468 if (avctx->rc_max_rate &&
469 avctx->rc_min_rate == avctx->rc_max_rate &&
470 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
471 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
472 90000LL * (avctx->rc_buffer_size - 1) >
473 avctx->rc_max_rate * 0xFFFFLL) {
474 av_log(avctx, AV_LOG_INFO,
475 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
476 "specified vbv buffer is too large for the given bitrate!\n");
479 if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
480 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
481 s->codec_id != AV_CODEC_ID_FLV1) {
482 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
483 return AVERROR(EINVAL);
486 if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
487 av_log(avctx, AV_LOG_ERROR,
488 "OBMC is only supported with simple mb decision\n");
489 return AVERROR(EINVAL);
492 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
493 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
494 return AVERROR(EINVAL);
497 if (s->max_b_frames &&
498 s->codec_id != AV_CODEC_ID_MPEG4 &&
499 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
500 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
501 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
502 return AVERROR(EINVAL);
504 if (s->max_b_frames < 0) {
505 av_log(avctx, AV_LOG_ERROR,
506 "max b frames must be 0 or positive for mpegvideo based encoders\n");
507 return AVERROR(EINVAL);
510 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
511 s->codec_id == AV_CODEC_ID_H263 ||
512 s->codec_id == AV_CODEC_ID_H263P) &&
513 (avctx->sample_aspect_ratio.num > 255 ||
514 avctx->sample_aspect_ratio.den > 255)) {
515 av_log(avctx, AV_LOG_WARNING,
516 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
517 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
518 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
519 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
522 if ((s->codec_id == AV_CODEC_ID_H263 ||
523 s->codec_id == AV_CODEC_ID_H263P) &&
524 (avctx->width > 2048 ||
525 avctx->height > 1152 )) {
526 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
527 return AVERROR(EINVAL);
529 if ((s->codec_id == AV_CODEC_ID_H263 ||
530 s->codec_id == AV_CODEC_ID_H263P ||
531 s->codec_id == AV_CODEC_ID_RV20) &&
532 ((avctx->width &3) ||
533 (avctx->height&3) )) {
534 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
535 return AVERROR(EINVAL);
538 if (s->codec_id == AV_CODEC_ID_RV10 &&
540 avctx->height&15 )) {
541 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
542 return AVERROR(EINVAL);
545 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
546 s->codec_id == AV_CODEC_ID_WMV2) &&
548 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
549 return AVERROR(EINVAL);
552 if ((avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
553 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
554 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
555 return AVERROR(EINVAL);
558 #if FF_API_PRIVATE_OPT
559 FF_DISABLE_DEPRECATION_WARNINGS
560 if (avctx->mpeg_quant)
562 FF_ENABLE_DEPRECATION_WARNINGS
565 // FIXME mpeg2 uses that too
566 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
567 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
568 av_log(avctx, AV_LOG_ERROR,
569 "mpeg2 style quantization not supported by codec\n");
570 return AVERROR(EINVAL);
573 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
574 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
575 return AVERROR(EINVAL);
578 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
579 avctx->mb_decision != FF_MB_DECISION_RD) {
580 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
581 return AVERROR(EINVAL);
584 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
585 (s->codec_id == AV_CODEC_ID_AMV ||
586 s->codec_id == AV_CODEC_ID_MJPEG)) {
587 // Used to produce garbage with MJPEG.
588 av_log(avctx, AV_LOG_ERROR,
589 "QP RD is no longer compatible with MJPEG or AMV\n");
590 return AVERROR(EINVAL);
593 #if FF_API_PRIVATE_OPT
594 FF_DISABLE_DEPRECATION_WARNINGS
595 if (avctx->scenechange_threshold)
596 s->scenechange_threshold = avctx->scenechange_threshold;
597 FF_ENABLE_DEPRECATION_WARNINGS
600 if (s->scenechange_threshold < 1000000000 &&
601 (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
602 av_log(avctx, AV_LOG_ERROR,
603 "closed gop with scene change detection are not supported yet, "
604 "set threshold to 1000000000\n");
605 return AVERROR_PATCHWELCOME;
608 if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
609 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
610 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
611 av_log(avctx, AV_LOG_ERROR,
612 "low delay forcing is only available for mpeg2, "
613 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
614 return AVERROR(EINVAL);
616 if (s->max_b_frames != 0) {
617 av_log(avctx, AV_LOG_ERROR,
618 "B-frames cannot be used with low delay\n");
619 return AVERROR(EINVAL);
623 if (s->q_scale_type == 1) {
624 if (avctx->qmax > 28) {
625 av_log(avctx, AV_LOG_ERROR,
626 "non linear quant only supports qmax <= 28 currently\n");
627 return AVERROR_PATCHWELCOME;
631 if (avctx->slices > 1 &&
632 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
633 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
634 return AVERROR(EINVAL);
637 if (avctx->thread_count > 1 &&
638 s->codec_id != AV_CODEC_ID_MPEG4 &&
639 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
640 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
641 s->codec_id != AV_CODEC_ID_MJPEG &&
642 (s->codec_id != AV_CODEC_ID_H263P)) {
643 av_log(avctx, AV_LOG_ERROR,
644 "multi threaded encoding not supported by codec\n");
645 return AVERROR_PATCHWELCOME;
648 if (avctx->thread_count < 1) {
649 av_log(avctx, AV_LOG_ERROR,
650 "automatic thread number detection not supported by codec, "
652 return AVERROR_PATCHWELCOME;
655 #if FF_API_PRIVATE_OPT
656 FF_DISABLE_DEPRECATION_WARNINGS
657 if (avctx->b_frame_strategy)
658 s->b_frame_strategy = avctx->b_frame_strategy;
659 if (avctx->b_sensitivity != 40)
660 s->b_sensitivity = avctx->b_sensitivity;
661 FF_ENABLE_DEPRECATION_WARNINGS
664 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
665 av_log(avctx, AV_LOG_INFO,
666 "notice: b_frame_strategy only affects the first pass\n");
667 s->b_frame_strategy = 0;
670 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
672 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
673 avctx->time_base.den /= i;
674 avctx->time_base.num /= i;
678 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
679 // (a + x * 3 / 8) / x
680 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
681 s->inter_quant_bias = 0;
683 s->intra_quant_bias = 0;
685 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
688 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
689 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
690 return AVERROR(EINVAL);
693 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
695 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
696 avctx->time_base.den > (1 << 16) - 1) {
697 av_log(avctx, AV_LOG_ERROR,
698 "timebase %d/%d not supported by MPEG 4 standard, "
699 "the maximum admitted value for the timebase denominator "
700 "is %d\n", avctx->time_base.num, avctx->time_base.den,
702 return AVERROR(EINVAL);
704 s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
706 switch (avctx->codec->id) {
707 case AV_CODEC_ID_MPEG1VIDEO:
708 s->out_format = FMT_MPEG1;
709 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
710 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
712 case AV_CODEC_ID_MPEG2VIDEO:
713 s->out_format = FMT_MPEG1;
714 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
715 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
718 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
719 case AV_CODEC_ID_MJPEG:
720 case AV_CODEC_ID_AMV:
721 s->out_format = FMT_MJPEG;
722 s->intra_only = 1; /* force intra only for jpeg */
723 if ((ret = ff_mjpeg_encode_init(s)) < 0)
729 case AV_CODEC_ID_SPEEDHQ:
730 s->out_format = FMT_SPEEDHQ;
731 s->intra_only = 1; /* force intra only for SHQ */
732 if (!CONFIG_SPEEDHQ_ENCODER)
733 return AVERROR_ENCODER_NOT_FOUND;
734 if ((ret = ff_speedhq_encode_init(s)) < 0)
739 case AV_CODEC_ID_H261:
740 if (!CONFIG_H261_ENCODER)
741 return AVERROR_ENCODER_NOT_FOUND;
742 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
743 av_log(avctx, AV_LOG_ERROR,
744 "The specified picture size of %dx%d is not valid for the "
745 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
746 s->width, s->height);
747 return AVERROR(EINVAL);
749 s->out_format = FMT_H261;
752 s->rtp_mode = 0; /* Sliced encoding not supported */
754 case AV_CODEC_ID_H263:
755 if (!CONFIG_H263_ENCODER)
756 return AVERROR_ENCODER_NOT_FOUND;
757 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
758 s->width, s->height) == 8) {
759 av_log(avctx, AV_LOG_ERROR,
760 "The specified picture size of %dx%d is not valid for "
761 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
762 "352x288, 704x576, and 1408x1152. "
763 "Try H.263+.\n", s->width, s->height);
764 return AVERROR(EINVAL);
766 s->out_format = FMT_H263;
770 case AV_CODEC_ID_H263P:
771 s->out_format = FMT_H263;
774 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
775 s->modified_quant = s->h263_aic;
776 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
777 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
780 /* These are just to be sure */
784 case AV_CODEC_ID_FLV1:
785 s->out_format = FMT_H263;
786 s->h263_flv = 2; /* format = 1; 11-bit codes */
787 s->unrestricted_mv = 1;
788 s->rtp_mode = 0; /* don't allow GOB */
792 case AV_CODEC_ID_RV10:
793 s->out_format = FMT_H263;
797 case AV_CODEC_ID_RV20:
798 s->out_format = FMT_H263;
801 s->modified_quant = 1;
805 s->unrestricted_mv = 0;
807 case AV_CODEC_ID_MPEG4:
808 s->out_format = FMT_H263;
810 s->unrestricted_mv = 1;
811 s->low_delay = s->max_b_frames ? 0 : 1;
812 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
814 case AV_CODEC_ID_MSMPEG4V2:
815 s->out_format = FMT_H263;
817 s->unrestricted_mv = 1;
818 s->msmpeg4_version = 2;
822 case AV_CODEC_ID_MSMPEG4V3:
823 s->out_format = FMT_H263;
825 s->unrestricted_mv = 1;
826 s->msmpeg4_version = 3;
827 s->flipflop_rounding = 1;
831 case AV_CODEC_ID_WMV1:
832 s->out_format = FMT_H263;
834 s->unrestricted_mv = 1;
835 s->msmpeg4_version = 4;
836 s->flipflop_rounding = 1;
840 case AV_CODEC_ID_WMV2:
841 s->out_format = FMT_H263;
843 s->unrestricted_mv = 1;
844 s->msmpeg4_version = 5;
845 s->flipflop_rounding = 1;
850 return AVERROR(EINVAL);
853 #if FF_API_PRIVATE_OPT
854 FF_DISABLE_DEPRECATION_WARNINGS
855 if (avctx->noise_reduction)
856 s->noise_reduction = avctx->noise_reduction;
857 FF_ENABLE_DEPRECATION_WARNINGS
860 avctx->has_b_frames = !s->low_delay;
864 s->progressive_frame =
865 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
866 AV_CODEC_FLAG_INTERLACED_ME) ||
871 if ((ret = ff_mpv_common_init(s)) < 0)
874 ff_fdctdsp_init(&s->fdsp, avctx);
875 ff_me_cmp_init(&s->mecc, avctx);
876 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
877 ff_pixblockdsp_init(&s->pdsp, avctx);
878 ff_qpeldsp_init(&s->qdsp);
880 if (s->msmpeg4_version) {
881 int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
882 if (!(s->ac_stats = av_mallocz(ac_stats_size)))
883 return AVERROR(ENOMEM);
886 if (!(avctx->stats_out = av_mallocz(256)) ||
887 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
888 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
889 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
890 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
891 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
892 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
893 !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
894 !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
895 return AVERROR(ENOMEM);
897 if (s->noise_reduction) {
898 if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
899 return AVERROR(ENOMEM);
902 ff_dct_encode_init(s);
904 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
905 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
907 if (s->slice_context_count > 1) {
910 if (avctx->codec_id == AV_CODEC_ID_H263P)
911 s->h263_slice_structured = 1;
914 s->quant_precision = 5;
916 #if FF_API_PRIVATE_OPT
917 FF_DISABLE_DEPRECATION_WARNINGS
918 if (avctx->frame_skip_threshold)
919 s->frame_skip_threshold = avctx->frame_skip_threshold;
920 if (avctx->frame_skip_factor)
921 s->frame_skip_factor = avctx->frame_skip_factor;
922 if (avctx->frame_skip_exp)
923 s->frame_skip_exp = avctx->frame_skip_exp;
924 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
925 s->frame_skip_cmp = avctx->frame_skip_cmp;
926 FF_ENABLE_DEPRECATION_WARNINGS
929 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
930 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
932 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
933 ff_h261_encode_init(s);
934 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
935 ff_h263_encode_init(s);
936 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
937 ff_msmpeg4_encode_init(s);
938 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
939 && s->out_format == FMT_MPEG1)
940 ff_mpeg1_encode_init(s);
943 for (i = 0; i < 64; i++) {
944 int j = s->idsp.idct_permutation[i];
945 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
947 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
948 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
949 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
951 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
952 } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
954 s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
957 s->chroma_intra_matrix[j] =
958 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
959 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
961 if (avctx->intra_matrix)
962 s->intra_matrix[j] = avctx->intra_matrix[i];
963 if (avctx->inter_matrix)
964 s->inter_matrix[j] = avctx->inter_matrix[i];
967 /* precompute matrix */
968 /* for mjpeg, we do include qscale in the matrix */
969 if (s->out_format != FMT_MJPEG) {
970 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
971 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
973 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
974 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
978 if ((ret = ff_rate_control_init(s)) < 0)
981 #if FF_API_PRIVATE_OPT
982 FF_DISABLE_DEPRECATION_WARNINGS
983 if (avctx->brd_scale)
984 s->brd_scale = avctx->brd_scale;
986 if (avctx->prediction_method)
987 s->pred = avctx->prediction_method + 1;
988 FF_ENABLE_DEPRECATION_WARNINGS
991 if (s->b_frame_strategy == 2) {
992 for (i = 0; i < s->max_b_frames + 2; i++) {
993 s->tmp_frames[i] = av_frame_alloc();
994 if (!s->tmp_frames[i])
995 return AVERROR(ENOMEM);
997 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
998 s->tmp_frames[i]->width = s->width >> s->brd_scale;
999 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1001 ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1007 cpb_props = ff_add_cpb_side_data(avctx);
1009 return AVERROR(ENOMEM);
1010 cpb_props->max_bitrate = avctx->rc_max_rate;
1011 cpb_props->min_bitrate = avctx->rc_min_rate;
1012 cpb_props->avg_bitrate = avctx->bit_rate;
1013 cpb_props->buffer_size = avctx->rc_buffer_size;
1018 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1020 MpegEncContext *s = avctx->priv_data;
1023 ff_rate_control_uninit(s);
1025 ff_mpv_common_end(s);
1026 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
1027 s->out_format == FMT_MJPEG)
1028 ff_mjpeg_encode_close(s);
1030 av_freep(&avctx->extradata);
1032 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1033 av_frame_free(&s->tmp_frames[i]);
1035 ff_free_picture_tables(&s->new_picture);
1036 ff_mpeg_unref_picture(avctx, &s->new_picture);
1038 av_freep(&avctx->stats_out);
1039 av_freep(&s->ac_stats);
1041 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1042 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1043 s->q_chroma_intra_matrix= NULL;
1044 s->q_chroma_intra_matrix16= NULL;
1045 av_freep(&s->q_intra_matrix);
1046 av_freep(&s->q_inter_matrix);
1047 av_freep(&s->q_intra_matrix16);
1048 av_freep(&s->q_inter_matrix16);
1049 av_freep(&s->input_picture);
1050 av_freep(&s->reordered_input_picture);
1051 av_freep(&s->dct_offset);
1056 static int get_sae(uint8_t *src, int ref, int stride)
1061 for (y = 0; y < 16; y++) {
1062 for (x = 0; x < 16; x++) {
1063 acc += FFABS(src[x + y * stride] - ref);
1070 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1071 uint8_t *ref, int stride)
1077 h = s->height & ~15;
1079 for (y = 0; y < h; y += 16) {
1080 for (x = 0; x < w; x += 16) {
1081 int offset = x + y * stride;
1082 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1084 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1085 int sae = get_sae(src + offset, mean, stride);
1087 acc += sae + 500 < sad;
1093 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1095 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1096 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1097 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1098 &s->linesize, &s->uvlinesize);
1101 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1103 Picture *pic = NULL;
1105 int i, display_picture_number = 0, ret;
1106 int encoding_delay = s->max_b_frames ? s->max_b_frames
1107 : (s->low_delay ? 0 : 1);
1108 int flush_offset = 1;
1113 display_picture_number = s->input_picture_number++;
1115 if (pts != AV_NOPTS_VALUE) {
1116 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1117 int64_t last = s->user_specified_pts;
1120 av_log(s->avctx, AV_LOG_ERROR,
1121 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1123 return AVERROR(EINVAL);
1126 if (!s->low_delay && display_picture_number == 1)
1127 s->dts_delta = pts - last;
1129 s->user_specified_pts = pts;
1131 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1132 s->user_specified_pts =
1133 pts = s->user_specified_pts + 1;
1134 av_log(s->avctx, AV_LOG_INFO,
1135 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1138 pts = display_picture_number;
1142 if (!pic_arg->buf[0] ||
1143 pic_arg->linesize[0] != s->linesize ||
1144 pic_arg->linesize[1] != s->uvlinesize ||
1145 pic_arg->linesize[2] != s->uvlinesize)
1147 if ((s->width & 15) || (s->height & 15))
1149 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1151 if (s->linesize & (STRIDE_ALIGN-1))
1154 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1155 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1157 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1161 pic = &s->picture[i];
1165 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1168 ret = alloc_picture(s, pic, direct);
1173 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1174 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1175 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1178 int h_chroma_shift, v_chroma_shift;
1179 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1183 for (i = 0; i < 3; i++) {
1184 int src_stride = pic_arg->linesize[i];
1185 int dst_stride = i ? s->uvlinesize : s->linesize;
1186 int h_shift = i ? h_chroma_shift : 0;
1187 int v_shift = i ? v_chroma_shift : 0;
1188 int w = s->width >> h_shift;
1189 int h = s->height >> v_shift;
1190 uint8_t *src = pic_arg->data[i];
1191 uint8_t *dst = pic->f->data[i];
1194 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1195 && !s->progressive_sequence
1196 && FFALIGN(s->height, 32) - s->height > 16)
1199 if (!s->avctx->rc_buffer_size)
1200 dst += INPLACE_OFFSET;
1202 if (src_stride == dst_stride)
1203 memcpy(dst, src, src_stride * h);
1206 uint8_t *dst2 = dst;
1208 memcpy(dst2, src, w);
1213 if ((s->width & 15) || (s->height & (vpad-1))) {
1214 s->mpvencdsp.draw_edges(dst, dst_stride,
1224 ret = av_frame_copy_props(pic->f, pic_arg);
1228 pic->f->display_picture_number = display_picture_number;
1229 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1231 /* Flushing: When we have not received enough input frames,
1232 * ensure s->input_picture[0] contains the first picture */
1233 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1234 if (s->input_picture[flush_offset])
1237 if (flush_offset <= 1)
1240 encoding_delay = encoding_delay - flush_offset + 1;
1243 /* shift buffer entries */
1244 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1245 s->input_picture[i - flush_offset] = s->input_picture[i];
1247 s->input_picture[encoding_delay] = (Picture*) pic;
1252 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1256 int64_t score64 = 0;
1258 for (plane = 0; plane < 3; plane++) {
1259 const int stride = p->f->linesize[plane];
1260 const int bw = plane ? 1 : 2;
1261 for (y = 0; y < s->mb_height * bw; y++) {
1262 for (x = 0; x < s->mb_width * bw; x++) {
1263 int off = p->shared ? 0 : 16;
1264 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1265 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1266 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1268 switch (FFABS(s->frame_skip_exp)) {
1269 case 0: score = FFMAX(score, v); break;
1270 case 1: score += FFABS(v); break;
1271 case 2: score64 += v * (int64_t)v; break;
1272 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1273 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1282 if (s->frame_skip_exp < 0)
1283 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1284 -1.0/s->frame_skip_exp);
1286 if (score64 < s->frame_skip_threshold)
1288 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1293 static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
1298 ret = avcodec_send_frame(c, frame);
1303 ret = avcodec_receive_packet(c, pkt);
1306 av_packet_unref(pkt);
1307 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1314 static int estimate_best_b_count(MpegEncContext *s)
1316 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1318 const int scale = s->brd_scale;
1319 int width = s->width >> scale;
1320 int height = s->height >> scale;
1321 int i, j, out_size, p_lambda, b_lambda, lambda2;
1322 int64_t best_rd = INT64_MAX;
1323 int best_b_count = -1;
1326 av_assert0(scale >= 0 && scale <= 3);
1328 pkt = av_packet_alloc();
1330 return AVERROR(ENOMEM);
1333 //s->next_picture_ptr->quality;
1334 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1335 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1336 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1337 if (!b_lambda) // FIXME we should do this somewhere else
1338 b_lambda = p_lambda;
1339 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1342 for (i = 0; i < s->max_b_frames + 2; i++) {
1343 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1344 s->next_picture_ptr;
1347 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1348 pre_input = *pre_input_ptr;
1349 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1351 if (!pre_input.shared && i) {
1352 data[0] += INPLACE_OFFSET;
1353 data[1] += INPLACE_OFFSET;
1354 data[2] += INPLACE_OFFSET;
1357 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1358 s->tmp_frames[i]->linesize[0],
1360 pre_input.f->linesize[0],
1362 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1363 s->tmp_frames[i]->linesize[1],
1365 pre_input.f->linesize[1],
1366 width >> 1, height >> 1);
1367 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1368 s->tmp_frames[i]->linesize[2],
1370 pre_input.f->linesize[2],
1371 width >> 1, height >> 1);
1375 for (j = 0; j < s->max_b_frames + 1; j++) {
1379 if (!s->input_picture[j])
1382 c = avcodec_alloc_context3(NULL);
1384 ret = AVERROR(ENOMEM);
1390 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1391 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1392 c->mb_decision = s->avctx->mb_decision;
1393 c->me_cmp = s->avctx->me_cmp;
1394 c->mb_cmp = s->avctx->mb_cmp;
1395 c->me_sub_cmp = s->avctx->me_sub_cmp;
1396 c->pix_fmt = AV_PIX_FMT_YUV420P;
1397 c->time_base = s->avctx->time_base;
1398 c->max_b_frames = s->max_b_frames;
1400 ret = avcodec_open2(c, codec, NULL);
1405 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1406 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1408 out_size = encode_frame(c, s->tmp_frames[0], pkt);
1414 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1416 for (i = 0; i < s->max_b_frames + 1; i++) {
1417 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1419 s->tmp_frames[i + 1]->pict_type = is_p ?
1420 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1421 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1423 out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1429 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1432 /* get the delayed frames */
1433 out_size = encode_frame(c, NULL, pkt);
1438 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1440 rd += c->error[0] + c->error[1] + c->error[2];
1448 avcodec_free_context(&c);
1449 av_packet_unref(pkt);
1456 av_packet_free(&pkt);
1458 return best_b_count;
1461 static int select_input_picture(MpegEncContext *s)
1465 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1466 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1467 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1469 /* set next picture type & ordering */
1470 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1471 if (s->frame_skip_threshold || s->frame_skip_factor) {
1472 if (s->picture_in_gop_number < s->gop_size &&
1473 s->next_picture_ptr &&
1474 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1475 // FIXME check that the gop check above is +-1 correct
1476 av_frame_unref(s->input_picture[0]->f);
1478 ff_vbv_update(s, 0);
1484 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1485 !s->next_picture_ptr || s->intra_only) {
1486 s->reordered_input_picture[0] = s->input_picture[0];
1487 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1488 s->reordered_input_picture[0]->f->coded_picture_number =
1489 s->coded_picture_number++;
1493 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1494 for (i = 0; i < s->max_b_frames + 1; i++) {
1495 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1497 if (pict_num >= s->rc_context.num_entries)
1499 if (!s->input_picture[i]) {
1500 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1504 s->input_picture[i]->f->pict_type =
1505 s->rc_context.entry[pict_num].new_pict_type;
1509 if (s->b_frame_strategy == 0) {
1510 b_frames = s->max_b_frames;
1511 while (b_frames && !s->input_picture[b_frames])
1513 } else if (s->b_frame_strategy == 1) {
1514 for (i = 1; i < s->max_b_frames + 1; i++) {
1515 if (s->input_picture[i] &&
1516 s->input_picture[i]->b_frame_score == 0) {
1517 s->input_picture[i]->b_frame_score =
1519 s->input_picture[i ]->f->data[0],
1520 s->input_picture[i - 1]->f->data[0],
1524 for (i = 0; i < s->max_b_frames + 1; i++) {
1525 if (!s->input_picture[i] ||
1526 s->input_picture[i]->b_frame_score - 1 >
1527 s->mb_num / s->b_sensitivity)
1531 b_frames = FFMAX(0, i - 1);
1534 for (i = 0; i < b_frames + 1; i++) {
1535 s->input_picture[i]->b_frame_score = 0;
1537 } else if (s->b_frame_strategy == 2) {
1538 b_frames = estimate_best_b_count(s);
1545 for (i = b_frames - 1; i >= 0; i--) {
1546 int type = s->input_picture[i]->f->pict_type;
1547 if (type && type != AV_PICTURE_TYPE_B)
1550 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1551 b_frames == s->max_b_frames) {
1552 av_log(s->avctx, AV_LOG_ERROR,
1553 "warning, too many B-frames in a row\n");
1556 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1557 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1558 s->gop_size > s->picture_in_gop_number) {
1559 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1561 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1563 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1567 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1568 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1571 s->reordered_input_picture[0] = s->input_picture[b_frames];
1572 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1573 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1574 s->reordered_input_picture[0]->f->coded_picture_number =
1575 s->coded_picture_number++;
1576 for (i = 0; i < b_frames; i++) {
1577 s->reordered_input_picture[i + 1] = s->input_picture[i];
1578 s->reordered_input_picture[i + 1]->f->pict_type =
1580 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1581 s->coded_picture_number++;
1586 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1588 if (s->reordered_input_picture[0]) {
1589 s->reordered_input_picture[0]->reference =
1590 s->reordered_input_picture[0]->f->pict_type !=
1591 AV_PICTURE_TYPE_B ? 3 : 0;
1593 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1596 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1597 // input is a shared pix, so we can't modify it -> allocate a new
1598 // one & ensure that the shared one is reuseable
1601 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1604 pic = &s->picture[i];
1606 pic->reference = s->reordered_input_picture[0]->reference;
1607 if (alloc_picture(s, pic, 0) < 0) {
1611 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1615 /* mark us unused / free shared pic */
1616 av_frame_unref(s->reordered_input_picture[0]->f);
1617 s->reordered_input_picture[0]->shared = 0;
1619 s->current_picture_ptr = pic;
1621 // input is not a shared pix -> reuse buffer for current_pix
1622 s->current_picture_ptr = s->reordered_input_picture[0];
1623 for (i = 0; i < 4; i++) {
1624 if (s->new_picture.f->data[i])
1625 s->new_picture.f->data[i] += INPLACE_OFFSET;
1628 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1629 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1630 s->current_picture_ptr)) < 0)
1633 s->picture_number = s->new_picture.f->display_picture_number;
1638 static void frame_end(MpegEncContext *s)
1640 if (s->unrestricted_mv &&
1641 s->current_picture.reference &&
1643 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1644 int hshift = desc->log2_chroma_w;
1645 int vshift = desc->log2_chroma_h;
1646 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1647 s->current_picture.f->linesize[0],
1648 s->h_edge_pos, s->v_edge_pos,
1649 EDGE_WIDTH, EDGE_WIDTH,
1650 EDGE_TOP | EDGE_BOTTOM);
1651 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1652 s->current_picture.f->linesize[1],
1653 s->h_edge_pos >> hshift,
1654 s->v_edge_pos >> vshift,
1655 EDGE_WIDTH >> hshift,
1656 EDGE_WIDTH >> vshift,
1657 EDGE_TOP | EDGE_BOTTOM);
1658 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1659 s->current_picture.f->linesize[2],
1660 s->h_edge_pos >> hshift,
1661 s->v_edge_pos >> vshift,
1662 EDGE_WIDTH >> hshift,
1663 EDGE_WIDTH >> vshift,
1664 EDGE_TOP | EDGE_BOTTOM);
1669 s->last_pict_type = s->pict_type;
1670 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1671 if (s->pict_type!= AV_PICTURE_TYPE_B)
1672 s->last_non_b_pict_type = s->pict_type;
1674 #if FF_API_CODED_FRAME
1675 FF_DISABLE_DEPRECATION_WARNINGS
1676 av_frame_unref(s->avctx->coded_frame);
1677 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1678 FF_ENABLE_DEPRECATION_WARNINGS
1680 #if FF_API_ERROR_FRAME
1681 FF_DISABLE_DEPRECATION_WARNINGS
1682 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1683 sizeof(s->current_picture.encoding_error));
1684 FF_ENABLE_DEPRECATION_WARNINGS
1688 static void update_noise_reduction(MpegEncContext *s)
1692 for (intra = 0; intra < 2; intra++) {
1693 if (s->dct_count[intra] > (1 << 16)) {
1694 for (i = 0; i < 64; i++) {
1695 s->dct_error_sum[intra][i] >>= 1;
1697 s->dct_count[intra] >>= 1;
1700 for (i = 0; i < 64; i++) {
1701 s->dct_offset[intra][i] = (s->noise_reduction *
1702 s->dct_count[intra] +
1703 s->dct_error_sum[intra][i] / 2) /
1704 (s->dct_error_sum[intra][i] + 1);
1709 static int frame_start(MpegEncContext *s)
1713 /* mark & release old frames */
1714 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1715 s->last_picture_ptr != s->next_picture_ptr &&
1716 s->last_picture_ptr->f->buf[0]) {
1717 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1720 s->current_picture_ptr->f->pict_type = s->pict_type;
1721 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1723 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1724 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1725 s->current_picture_ptr)) < 0)
1728 if (s->pict_type != AV_PICTURE_TYPE_B) {
1729 s->last_picture_ptr = s->next_picture_ptr;
1731 s->next_picture_ptr = s->current_picture_ptr;
1734 if (s->last_picture_ptr) {
1735 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1736 if (s->last_picture_ptr->f->buf[0] &&
1737 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1738 s->last_picture_ptr)) < 0)
1741 if (s->next_picture_ptr) {
1742 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1743 if (s->next_picture_ptr->f->buf[0] &&
1744 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1745 s->next_picture_ptr)) < 0)
1749 if (s->picture_structure!= PICT_FRAME) {
1751 for (i = 0; i < 4; i++) {
1752 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1753 s->current_picture.f->data[i] +=
1754 s->current_picture.f->linesize[i];
1756 s->current_picture.f->linesize[i] *= 2;
1757 s->last_picture.f->linesize[i] *= 2;
1758 s->next_picture.f->linesize[i] *= 2;
1762 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1763 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1764 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1765 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1766 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1767 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1769 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1770 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1773 if (s->dct_error_sum) {
1774 av_assert2(s->noise_reduction && s->encoding);
1775 update_noise_reduction(s);
1781 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1782 const AVFrame *pic_arg, int *got_packet)
1784 MpegEncContext *s = avctx->priv_data;
1785 int i, stuffing_count, ret;
1786 int context_count = s->slice_context_count;
1788 s->vbv_ignore_qmax = 0;
1790 s->picture_in_gop_number++;
1792 if (load_input_picture(s, pic_arg) < 0)
1795 if (select_input_picture(s) < 0) {
1800 if (s->new_picture.f->data[0]) {
1801 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1802 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1804 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1805 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1808 s->mb_info_ptr = av_packet_new_side_data(pkt,
1809 AV_PKT_DATA_H263_MB_INFO,
1810 s->mb_width*s->mb_height*12);
1811 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1814 for (i = 0; i < context_count; i++) {
1815 int start_y = s->thread_context[i]->start_mb_y;
1816 int end_y = s->thread_context[i]-> end_mb_y;
1817 int h = s->mb_height;
1818 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1819 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1821 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1824 s->pict_type = s->new_picture.f->pict_type;
1826 ret = frame_start(s);
1830 ret = encode_picture(s, s->picture_number);
1831 if (growing_buffer) {
1832 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1833 pkt->data = s->pb.buf;
1834 pkt->size = avctx->internal->byte_buffer_size;
1839 #if FF_API_STAT_BITS
1840 FF_DISABLE_DEPRECATION_WARNINGS
1841 avctx->header_bits = s->header_bits;
1842 avctx->mv_bits = s->mv_bits;
1843 avctx->misc_bits = s->misc_bits;
1844 avctx->i_tex_bits = s->i_tex_bits;
1845 avctx->p_tex_bits = s->p_tex_bits;
1846 avctx->i_count = s->i_count;
1847 // FIXME f/b_count in avctx
1848 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1849 avctx->skip_count = s->skip_count;
1850 FF_ENABLE_DEPRECATION_WARNINGS
1855 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
1856 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1858 if (avctx->rc_buffer_size) {
1859 RateControlContext *rcc = &s->rc_context;
1860 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1861 int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1862 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1864 if (put_bits_count(&s->pb) > max_size &&
1865 s->lambda < s->lmax) {
1866 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1867 (s->qscale + 1) / s->qscale);
1868 if (s->adaptive_quant) {
1870 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1871 s->lambda_table[i] =
1872 FFMAX(s->lambda_table[i] + min_step,
1873 s->lambda_table[i] * (s->qscale + 1) /
1876 s->mb_skipped = 0; // done in frame_start()
1877 // done in encode_picture() so we must undo it
1878 if (s->pict_type == AV_PICTURE_TYPE_P) {
1879 if (s->flipflop_rounding ||
1880 s->codec_id == AV_CODEC_ID_H263P ||
1881 s->codec_id == AV_CODEC_ID_MPEG4)
1882 s->no_rounding ^= 1;
1884 if (s->pict_type != AV_PICTURE_TYPE_B) {
1885 s->time_base = s->last_time_base;
1886 s->last_non_b_time = s->time - s->pp_time;
1888 for (i = 0; i < context_count; i++) {
1889 PutBitContext *pb = &s->thread_context[i]->pb;
1890 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1892 s->vbv_ignore_qmax = 1;
1893 av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1897 av_assert0(avctx->rc_max_rate);
1900 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1901 ff_write_pass1_stats(s);
1903 for (i = 0; i < 4; i++) {
1904 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1905 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1907 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1908 s->current_picture_ptr->encoding_error,
1909 (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1912 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1913 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1914 s->misc_bits + s->i_tex_bits +
1916 flush_put_bits(&s->pb);
1917 s->frame_bits = put_bits_count(&s->pb);
1919 stuffing_count = ff_vbv_update(s, s->frame_bits);
1920 s->stuffing_bits = 8*stuffing_count;
1921 if (stuffing_count) {
1922 if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1923 av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1927 switch (s->codec_id) {
1928 case AV_CODEC_ID_MPEG1VIDEO:
1929 case AV_CODEC_ID_MPEG2VIDEO:
1930 while (stuffing_count--) {
1931 put_bits(&s->pb, 8, 0);
1934 case AV_CODEC_ID_MPEG4:
1935 put_bits(&s->pb, 16, 0);
1936 put_bits(&s->pb, 16, 0x1C3);
1937 stuffing_count -= 4;
1938 while (stuffing_count--) {
1939 put_bits(&s->pb, 8, 0xFF);
1943 av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1945 flush_put_bits(&s->pb);
1946 s->frame_bits = put_bits_count(&s->pb);
1949 /* update MPEG-1/2 vbv_delay for CBR */
1950 if (avctx->rc_max_rate &&
1951 avctx->rc_min_rate == avctx->rc_max_rate &&
1952 s->out_format == FMT_MPEG1 &&
1953 90000LL * (avctx->rc_buffer_size - 1) <=
1954 avctx->rc_max_rate * 0xFFFFLL) {
1955 AVCPBProperties *props;
1958 int vbv_delay, min_delay;
1959 double inbits = avctx->rc_max_rate *
1960 av_q2d(avctx->time_base);
1961 int minbits = s->frame_bits - 8 *
1962 (s->vbv_delay_ptr - s->pb.buf - 1);
1963 double bits = s->rc_context.buffer_index + minbits - inbits;
1966 av_log(avctx, AV_LOG_ERROR,
1967 "Internal error, negative bits\n");
1969 av_assert1(s->repeat_first_field == 0);
1971 vbv_delay = bits * 90000 / avctx->rc_max_rate;
1972 min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
1975 vbv_delay = FFMAX(vbv_delay, min_delay);
1977 av_assert0(vbv_delay < 0xFFFF);
1979 s->vbv_delay_ptr[0] &= 0xF8;
1980 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1981 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1982 s->vbv_delay_ptr[2] &= 0x07;
1983 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1985 props = av_cpb_properties_alloc(&props_size);
1987 return AVERROR(ENOMEM);
1988 props->vbv_delay = vbv_delay * 300;
1990 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
1991 (uint8_t*)props, props_size);
1997 #if FF_API_VBV_DELAY
1998 FF_DISABLE_DEPRECATION_WARNINGS
1999 avctx->vbv_delay = vbv_delay * 300;
2000 FF_ENABLE_DEPRECATION_WARNINGS
2003 s->total_bits += s->frame_bits;
2004 #if FF_API_STAT_BITS
2005 FF_DISABLE_DEPRECATION_WARNINGS
2006 avctx->frame_bits = s->frame_bits;
2007 FF_ENABLE_DEPRECATION_WARNINGS
2011 pkt->pts = s->current_picture.f->pts;
2012 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2013 if (!s->current_picture.f->coded_picture_number)
2014 pkt->dts = pkt->pts - s->dts_delta;
2016 pkt->dts = s->reordered_pts;
2017 s->reordered_pts = pkt->pts;
2019 pkt->dts = pkt->pts;
2020 if (s->current_picture.f->key_frame)
2021 pkt->flags |= AV_PKT_FLAG_KEY;
2023 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2028 /* release non-reference frames */
2029 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2030 if (!s->picture[i].reference)
2031 ff_mpeg_unref_picture(avctx, &s->picture[i]);
2034 av_assert1((s->frame_bits & 7) == 0);
2036 pkt->size = s->frame_bits / 8;
2037 *got_packet = !!pkt->size;
2041 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2042 int n, int threshold)
2044 static const char tab[64] = {
2045 3, 2, 2, 1, 1, 1, 1, 1,
2046 1, 1, 1, 1, 1, 1, 1, 1,
2047 1, 1, 1, 1, 1, 1, 1, 1,
2048 0, 0, 0, 0, 0, 0, 0, 0,
2049 0, 0, 0, 0, 0, 0, 0, 0,
2050 0, 0, 0, 0, 0, 0, 0, 0,
2051 0, 0, 0, 0, 0, 0, 0, 0,
2052 0, 0, 0, 0, 0, 0, 0, 0
2057 int16_t *block = s->block[n];
2058 const int last_index = s->block_last_index[n];
2061 if (threshold < 0) {
2063 threshold = -threshold;
2067 /* Are all we could set to zero already zero? */
2068 if (last_index <= skip_dc - 1)
2071 for (i = 0; i <= last_index; i++) {
2072 const int j = s->intra_scantable.permutated[i];
2073 const int level = FFABS(block[j]);
2075 if (skip_dc && i == 0)
2079 } else if (level > 1) {
2085 if (score >= threshold)
2087 for (i = skip_dc; i <= last_index; i++) {
2088 const int j = s->intra_scantable.permutated[i];
2092 s->block_last_index[n] = 0;
2094 s->block_last_index[n] = -1;
2097 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2101 const int maxlevel = s->max_qcoeff;
2102 const int minlevel = s->min_qcoeff;
2106 i = 1; // skip clipping of intra dc
2110 for (; i <= last_index; i++) {
2111 const int j = s->intra_scantable.permutated[i];
2112 int level = block[j];
2114 if (level > maxlevel) {
2117 } else if (level < minlevel) {
2125 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2126 av_log(s->avctx, AV_LOG_INFO,
2127 "warning, clipping %d dct coefficients to %d..%d\n",
2128 overflow, minlevel, maxlevel);
2131 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2135 for (y = 0; y < 8; y++) {
2136 for (x = 0; x < 8; x++) {
2142 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2143 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2144 int v = ptr[x2 + y2 * stride];
2150 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2155 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2156 int motion_x, int motion_y,
2157 int mb_block_height,
2161 int16_t weight[12][64];
2162 int16_t orig[12][64];
2163 const int mb_x = s->mb_x;
2164 const int mb_y = s->mb_y;
2167 int dct_offset = s->linesize * 8; // default for progressive frames
2168 int uv_dct_offset = s->uvlinesize * 8;
2169 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2170 ptrdiff_t wrap_y, wrap_c;
2172 for (i = 0; i < mb_block_count; i++)
2173 skip_dct[i] = s->skipdct;
2175 if (s->adaptive_quant) {
2176 const int last_qp = s->qscale;
2177 const int mb_xy = mb_x + mb_y * s->mb_stride;
2179 s->lambda = s->lambda_table[mb_xy];
2182 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2183 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2184 s->dquant = s->qscale - last_qp;
2186 if (s->out_format == FMT_H263) {
2187 s->dquant = av_clip(s->dquant, -2, 2);
2189 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2191 if (s->pict_type == AV_PICTURE_TYPE_B) {
2192 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2195 if (s->mv_type == MV_TYPE_8X8)
2201 ff_set_qscale(s, last_qp + s->dquant);
2202 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2203 ff_set_qscale(s, s->qscale + s->dquant);
2205 wrap_y = s->linesize;
2206 wrap_c = s->uvlinesize;
2207 ptr_y = s->new_picture.f->data[0] +
2208 (mb_y * 16 * wrap_y) + mb_x * 16;
2209 ptr_cb = s->new_picture.f->data[1] +
2210 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2211 ptr_cr = s->new_picture.f->data[2] +
2212 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2214 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2215 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2216 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2217 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2218 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2220 16, 16, mb_x * 16, mb_y * 16,
2221 s->width, s->height);
2223 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2225 mb_block_width, mb_block_height,
2226 mb_x * mb_block_width, mb_y * mb_block_height,
2228 ptr_cb = ebuf + 16 * wrap_y;
2229 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2231 mb_block_width, mb_block_height,
2232 mb_x * mb_block_width, mb_y * mb_block_height,
2234 ptr_cr = ebuf + 16 * wrap_y + 16;
2238 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2239 int progressive_score, interlaced_score;
2241 s->interlaced_dct = 0;
2242 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2243 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2244 NULL, wrap_y, 8) - 400;
2246 if (progressive_score > 0) {
2247 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2248 NULL, wrap_y * 2, 8) +
2249 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2250 NULL, wrap_y * 2, 8);
2251 if (progressive_score > interlaced_score) {
2252 s->interlaced_dct = 1;
2254 dct_offset = wrap_y;
2255 uv_dct_offset = wrap_c;
2257 if (s->chroma_format == CHROMA_422 ||
2258 s->chroma_format == CHROMA_444)
2264 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2265 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2266 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2267 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2269 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2273 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2274 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2275 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2276 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2277 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2278 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2279 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2280 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2281 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2282 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2283 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2284 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2288 op_pixels_func (*op_pix)[4];
2289 qpel_mc_func (*op_qpix)[16];
2290 uint8_t *dest_y, *dest_cb, *dest_cr;
2292 dest_y = s->dest[0];
2293 dest_cb = s->dest[1];
2294 dest_cr = s->dest[2];
2296 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2297 op_pix = s->hdsp.put_pixels_tab;
2298 op_qpix = s->qdsp.put_qpel_pixels_tab;
2300 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2301 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2304 if (s->mv_dir & MV_DIR_FORWARD) {
2305 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2306 s->last_picture.f->data,
2308 op_pix = s->hdsp.avg_pixels_tab;
2309 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2311 if (s->mv_dir & MV_DIR_BACKWARD) {
2312 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2313 s->next_picture.f->data,
2317 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2318 int progressive_score, interlaced_score;
2320 s->interlaced_dct = 0;
2321 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2322 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2326 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2327 progressive_score -= 400;
2329 if (progressive_score > 0) {
2330 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2332 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2336 if (progressive_score > interlaced_score) {
2337 s->interlaced_dct = 1;
2339 dct_offset = wrap_y;
2340 uv_dct_offset = wrap_c;
2342 if (s->chroma_format == CHROMA_422)
2348 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2349 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2350 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2351 dest_y + dct_offset, wrap_y);
2352 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2353 dest_y + dct_offset + 8, wrap_y);
2355 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2359 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2360 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2361 if (!s->chroma_y_shift) { /* 422 */
2362 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2363 dest_cb + uv_dct_offset, wrap_c);
2364 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2365 dest_cr + uv_dct_offset, wrap_c);
2368 /* pre quantization */
2369 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2370 2 * s->qscale * s->qscale) {
2372 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2374 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2376 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2377 wrap_y, 8) < 20 * s->qscale)
2379 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2380 wrap_y, 8) < 20 * s->qscale)
2382 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2384 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2386 if (!s->chroma_y_shift) { /* 422 */
2387 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2388 dest_cb + uv_dct_offset,
2389 wrap_c, 8) < 20 * s->qscale)
2391 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2392 dest_cr + uv_dct_offset,
2393 wrap_c, 8) < 20 * s->qscale)
2399 if (s->quantizer_noise_shaping) {
2401 get_visual_weight(weight[0], ptr_y , wrap_y);
2403 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2405 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2407 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2409 get_visual_weight(weight[4], ptr_cb , wrap_c);
2411 get_visual_weight(weight[5], ptr_cr , wrap_c);
2412 if (!s->chroma_y_shift) { /* 422 */
2414 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2417 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2420 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2423 /* DCT & quantize */
2424 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2426 for (i = 0; i < mb_block_count; i++) {
2429 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2430 // FIXME we could decide to change to quantizer instead of
2432 // JS: I don't think that would be a good idea it could lower
2433 // quality instead of improve it. Just INTRADC clipping
2434 // deserves changes in quantizer
2436 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2438 s->block_last_index[i] = -1;
2440 if (s->quantizer_noise_shaping) {
2441 for (i = 0; i < mb_block_count; i++) {
2443 s->block_last_index[i] =
2444 dct_quantize_refine(s, s->block[i], weight[i],
2445 orig[i], i, s->qscale);
2450 if (s->luma_elim_threshold && !s->mb_intra)
2451 for (i = 0; i < 4; i++)
2452 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2453 if (s->chroma_elim_threshold && !s->mb_intra)
2454 for (i = 4; i < mb_block_count; i++)
2455 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2457 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2458 for (i = 0; i < mb_block_count; i++) {
2459 if (s->block_last_index[i] == -1)
2460 s->coded_score[i] = INT_MAX / 256;
2465 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2466 s->block_last_index[4] =
2467 s->block_last_index[5] = 0;
2469 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2470 if (!s->chroma_y_shift) { /* 422 / 444 */
2471 for (i=6; i<12; i++) {
2472 s->block_last_index[i] = 0;
2473 s->block[i][0] = s->block[4][0];
2478 // non c quantize code returns incorrect block_last_index FIXME
2479 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2480 for (i = 0; i < mb_block_count; i++) {
2482 if (s->block_last_index[i] > 0) {
2483 for (j = 63; j > 0; j--) {
2484 if (s->block[i][s->intra_scantable.permutated[j]])
2487 s->block_last_index[i] = j;
2492 /* huffman encode */
2493 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2494 case AV_CODEC_ID_MPEG1VIDEO:
2495 case AV_CODEC_ID_MPEG2VIDEO:
2496 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2497 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2499 case AV_CODEC_ID_MPEG4:
2500 if (CONFIG_MPEG4_ENCODER)
2501 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2503 case AV_CODEC_ID_MSMPEG4V2:
2504 case AV_CODEC_ID_MSMPEG4V3:
2505 case AV_CODEC_ID_WMV1:
2506 if (CONFIG_MSMPEG4_ENCODER)
2507 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2509 case AV_CODEC_ID_WMV2:
2510 if (CONFIG_WMV2_ENCODER)
2511 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2513 case AV_CODEC_ID_H261:
2514 if (CONFIG_H261_ENCODER)
2515 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2517 case AV_CODEC_ID_H263:
2518 case AV_CODEC_ID_H263P:
2519 case AV_CODEC_ID_FLV1:
2520 case AV_CODEC_ID_RV10:
2521 case AV_CODEC_ID_RV20:
2522 if (CONFIG_H263_ENCODER)
2523 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2525 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2526 case AV_CODEC_ID_MJPEG:
2527 case AV_CODEC_ID_AMV:
2528 ff_mjpeg_encode_mb(s, s->block);
2531 case AV_CODEC_ID_SPEEDHQ:
2532 if (CONFIG_SPEEDHQ_ENCODER)
2533 ff_speedhq_encode_mb(s, s->block);
2540 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2542 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2543 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2544 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2547 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2550 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2553 d->mb_skip_run= s->mb_skip_run;
2555 d->last_dc[i] = s->last_dc[i];
2558 d->mv_bits= s->mv_bits;
2559 d->i_tex_bits= s->i_tex_bits;
2560 d->p_tex_bits= s->p_tex_bits;
2561 d->i_count= s->i_count;
2562 d->f_count= s->f_count;
2563 d->b_count= s->b_count;
2564 d->skip_count= s->skip_count;
2565 d->misc_bits= s->misc_bits;
2569 d->qscale= s->qscale;
2570 d->dquant= s->dquant;
2572 d->esc3_level_length= s->esc3_level_length;
2575 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2578 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2579 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2582 d->mb_skip_run= s->mb_skip_run;
2584 d->last_dc[i] = s->last_dc[i];
2587 d->mv_bits= s->mv_bits;
2588 d->i_tex_bits= s->i_tex_bits;
2589 d->p_tex_bits= s->p_tex_bits;
2590 d->i_count= s->i_count;
2591 d->f_count= s->f_count;
2592 d->b_count= s->b_count;
2593 d->skip_count= s->skip_count;
2594 d->misc_bits= s->misc_bits;
2596 d->mb_intra= s->mb_intra;
2597 d->mb_skipped= s->mb_skipped;
2598 d->mv_type= s->mv_type;
2599 d->mv_dir= s->mv_dir;
2601 if(s->data_partitioning){
2603 d->tex_pb= s->tex_pb;
2607 d->block_last_index[i]= s->block_last_index[i];
2608 d->interlaced_dct= s->interlaced_dct;
2609 d->qscale= s->qscale;
2611 d->esc3_level_length= s->esc3_level_length;
2614 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2615 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2616 int *dmin, int *next_block, int motion_x, int motion_y)
2619 uint8_t *dest_backup[3];
2621 copy_context_before_encode(s, backup, type);
2623 s->block= s->blocks[*next_block];
2624 s->pb= pb[*next_block];
2625 if(s->data_partitioning){
2626 s->pb2 = pb2 [*next_block];
2627 s->tex_pb= tex_pb[*next_block];
2631 memcpy(dest_backup, s->dest, sizeof(s->dest));
2632 s->dest[0] = s->sc.rd_scratchpad;
2633 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2634 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2635 av_assert0(s->linesize >= 32); //FIXME
2638 encode_mb(s, motion_x, motion_y);
2640 score= put_bits_count(&s->pb);
2641 if(s->data_partitioning){
2642 score+= put_bits_count(&s->pb2);
2643 score+= put_bits_count(&s->tex_pb);
2646 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2647 ff_mpv_reconstruct_mb(s, s->block);
2649 score *= s->lambda2;
2650 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2654 memcpy(s->dest, dest_backup, sizeof(s->dest));
2661 copy_context_after_encode(best, s, type);
2665 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2666 const uint32_t *sq = ff_square_tab + 256;
2671 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2672 else if(w==8 && h==8)
2673 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2677 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2686 static int sse_mb(MpegEncContext *s){
2690 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2691 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2694 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2695 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2696 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2697 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2699 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2700 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2701 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2704 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2705 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2706 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2709 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2710 MpegEncContext *s= *(void**)arg;
2714 s->me.dia_size= s->avctx->pre_dia_size;
2715 s->first_slice_line=1;
2716 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2717 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2718 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2720 s->first_slice_line=0;
2728 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2729 MpegEncContext *s= *(void**)arg;
2731 s->me.dia_size= s->avctx->dia_size;
2732 s->first_slice_line=1;
2733 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2734 s->mb_x=0; //for block init below
2735 ff_init_block_index(s);
2736 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2737 s->block_index[0]+=2;
2738 s->block_index[1]+=2;
2739 s->block_index[2]+=2;
2740 s->block_index[3]+=2;
2742 /* compute motion vector & mb_type and store in context */
2743 if(s->pict_type==AV_PICTURE_TYPE_B)
2744 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2746 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2748 s->first_slice_line=0;
2753 static int mb_var_thread(AVCodecContext *c, void *arg){
2754 MpegEncContext *s= *(void**)arg;
2757 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2758 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2761 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2763 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2765 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2766 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2768 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2769 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2770 s->me.mb_var_sum_temp += varc;
2776 static void write_slice_end(MpegEncContext *s){
2777 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2778 if(s->partitioned_frame){
2779 ff_mpeg4_merge_partitions(s);
2782 ff_mpeg4_stuffing(&s->pb);
2783 } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2784 s->out_format == FMT_MJPEG) {
2785 ff_mjpeg_encode_stuffing(s);
2786 } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2787 ff_speedhq_end_slice(s);
2790 flush_put_bits(&s->pb);
2792 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2793 s->misc_bits+= get_bits_diff(s);
2796 static void write_mb_info(MpegEncContext *s)
2798 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2799 int offset = put_bits_count(&s->pb);
2800 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2801 int gobn = s->mb_y / s->gob_index;
2803 if (CONFIG_H263_ENCODER)
2804 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2805 bytestream_put_le32(&ptr, offset);
2806 bytestream_put_byte(&ptr, s->qscale);
2807 bytestream_put_byte(&ptr, gobn);
2808 bytestream_put_le16(&ptr, mba);
2809 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2810 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2811 /* 4MV not implemented */
2812 bytestream_put_byte(&ptr, 0); /* hmv2 */
2813 bytestream_put_byte(&ptr, 0); /* vmv2 */
2816 static void update_mb_info(MpegEncContext *s, int startcode)
2820 if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2821 s->mb_info_size += 12;
2822 s->prev_mb_info = s->last_mb_info;
2825 s->prev_mb_info = put_bytes_count(&s->pb, 0);
2826 /* This might have incremented mb_info_size above, and we return without
2827 * actually writing any info into that slot yet. But in that case,
2828 * this will be called again at the start of the after writing the
2829 * start code, actually writing the mb info. */
2833 s->last_mb_info = put_bytes_count(&s->pb, 0);
2834 if (!s->mb_info_size)
2835 s->mb_info_size += 12;
2839 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2841 if (put_bytes_left(&s->pb, 0) < threshold
2842 && s->slice_context_count == 1
2843 && s->pb.buf == s->avctx->internal->byte_buffer) {
2844 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2845 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2847 uint8_t *new_buffer = NULL;
2848 int new_buffer_size = 0;
2850 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2851 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2852 return AVERROR(ENOMEM);
2857 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2858 s->avctx->internal->byte_buffer_size + size_increase);
2860 return AVERROR(ENOMEM);
2862 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2863 av_free(s->avctx->internal->byte_buffer);
2864 s->avctx->internal->byte_buffer = new_buffer;
2865 s->avctx->internal->byte_buffer_size = new_buffer_size;
2866 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2867 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2868 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2870 if (put_bytes_left(&s->pb, 0) < threshold)
2871 return AVERROR(EINVAL);
2875 static int encode_thread(AVCodecContext *c, void *arg){
2876 MpegEncContext *s= *(void**)arg;
2877 int mb_x, mb_y, mb_y_order;
2878 int chr_h= 16>>s->chroma_y_shift;
2880 MpegEncContext best_s = { 0 }, backup_s;
2881 uint8_t bit_buf[2][MAX_MB_BYTES];
2882 uint8_t bit_buf2[2][MAX_MB_BYTES];
2883 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2884 PutBitContext pb[2], pb2[2], tex_pb[2];
2887 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2888 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2889 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2892 s->last_bits= put_bits_count(&s->pb);
2903 /* init last dc values */
2904 /* note: quant matrix value (8) is implied here */
2905 s->last_dc[i] = 128 << s->intra_dc_precision;
2907 s->current_picture.encoding_error[i] = 0;
2909 if(s->codec_id==AV_CODEC_ID_AMV){
2910 s->last_dc[0] = 128*8/13;
2911 s->last_dc[1] = 128*8/14;
2912 s->last_dc[2] = 128*8/14;
2915 memset(s->last_mv, 0, sizeof(s->last_mv));
2919 switch(s->codec_id){
2920 case AV_CODEC_ID_H263:
2921 case AV_CODEC_ID_H263P:
2922 case AV_CODEC_ID_FLV1:
2923 if (CONFIG_H263_ENCODER)
2924 s->gob_index = H263_GOB_HEIGHT(s->height);
2926 case AV_CODEC_ID_MPEG4:
2927 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2928 ff_mpeg4_init_partitions(s);
2934 s->first_slice_line = 1;
2935 s->ptr_lastgob = s->pb.buf;
2936 for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2937 if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
2939 mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
2940 if (first_in_slice && mb_y_order != s->start_mb_y)
2941 ff_speedhq_end_slice(s);
2942 s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
2949 ff_set_qscale(s, s->qscale);
2950 ff_init_block_index(s);
2952 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2953 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2954 int mb_type= s->mb_type[xy];
2958 int size_increase = s->avctx->internal->byte_buffer_size/4
2959 + s->mb_width*MAX_MB_BYTES;
2961 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2962 if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
2963 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2966 if(s->data_partitioning){
2967 if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
2968 put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
2969 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2975 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2976 ff_update_block_index(s);
2978 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2979 ff_h261_reorder_mb_index(s);
2980 xy= s->mb_y*s->mb_stride + s->mb_x;
2981 mb_type= s->mb_type[xy];
2984 /* write gob / video packet header */
2986 int current_packet_size, is_gob_start;
2988 current_packet_size = put_bytes_count(&s->pb, 1)
2989 - (s->ptr_lastgob - s->pb.buf);
2991 is_gob_start = s->rtp_payload_size &&
2992 current_packet_size >= s->rtp_payload_size &&
2995 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2997 switch(s->codec_id){
2998 case AV_CODEC_ID_H263:
2999 case AV_CODEC_ID_H263P:
3000 if(!s->h263_slice_structured)
3001 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3003 case AV_CODEC_ID_MPEG2VIDEO:
3004 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3005 case AV_CODEC_ID_MPEG1VIDEO:
3006 if(s->mb_skip_run) is_gob_start=0;
3008 case AV_CODEC_ID_MJPEG:
3009 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3014 if(s->start_mb_y != mb_y || mb_x!=0){
3017 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3018 ff_mpeg4_init_partitions(s);
3022 av_assert2((put_bits_count(&s->pb)&7) == 0);
3023 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3025 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3026 int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
3027 int d = 100 / s->error_rate;
3029 current_packet_size=0;
3030 s->pb.buf_ptr= s->ptr_lastgob;
3031 av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3035 #if FF_API_RTP_CALLBACK
3036 FF_DISABLE_DEPRECATION_WARNINGS
3037 if (s->avctx->rtp_callback){
3038 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3039 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3041 FF_ENABLE_DEPRECATION_WARNINGS
3043 update_mb_info(s, 1);
3045 switch(s->codec_id){
3046 case AV_CODEC_ID_MPEG4:
3047 if (CONFIG_MPEG4_ENCODER) {
3048 ff_mpeg4_encode_video_packet_header(s);
3049 ff_mpeg4_clean_buffers(s);
3052 case AV_CODEC_ID_MPEG1VIDEO:
3053 case AV_CODEC_ID_MPEG2VIDEO:
3054 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3055 ff_mpeg1_encode_slice_header(s);
3056 ff_mpeg1_clean_buffers(s);
3059 case AV_CODEC_ID_H263:
3060 case AV_CODEC_ID_H263P:
3061 if (CONFIG_H263_ENCODER)
3062 ff_h263_encode_gob_header(s, mb_y);
3066 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3067 int bits= put_bits_count(&s->pb);
3068 s->misc_bits+= bits - s->last_bits;
3072 s->ptr_lastgob += current_packet_size;
3073 s->first_slice_line=1;
3074 s->resync_mb_x=mb_x;
3075 s->resync_mb_y=mb_y;
3079 if( (s->resync_mb_x == s->mb_x)
3080 && s->resync_mb_y+1 == s->mb_y){
3081 s->first_slice_line=0;
3085 s->dquant=0; //only for QP_RD
3087 update_mb_info(s, 0);
3089 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3091 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3093 copy_context_before_encode(&backup_s, s, -1);
3095 best_s.data_partitioning= s->data_partitioning;
3096 best_s.partitioned_frame= s->partitioned_frame;
3097 if(s->data_partitioning){
3098 backup_s.pb2= s->pb2;
3099 backup_s.tex_pb= s->tex_pb;
3102 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3103 s->mv_dir = MV_DIR_FORWARD;
3104 s->mv_type = MV_TYPE_16X16;
3106 s->mv[0][0][0] = s->p_mv_table[xy][0];
3107 s->mv[0][0][1] = s->p_mv_table[xy][1];
3108 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3109 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3111 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3112 s->mv_dir = MV_DIR_FORWARD;
3113 s->mv_type = MV_TYPE_FIELD;
3116 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3117 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3118 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3120 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3121 &dmin, &next_block, 0, 0);
3123 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3124 s->mv_dir = MV_DIR_FORWARD;
3125 s->mv_type = MV_TYPE_16X16;
3129 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3130 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3132 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3133 s->mv_dir = MV_DIR_FORWARD;
3134 s->mv_type = MV_TYPE_8X8;
3137 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3138 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3140 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3141 &dmin, &next_block, 0, 0);
3143 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3144 s->mv_dir = MV_DIR_FORWARD;
3145 s->mv_type = MV_TYPE_16X16;
3147 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3148 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3149 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3150 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3152 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3153 s->mv_dir = MV_DIR_BACKWARD;
3154 s->mv_type = MV_TYPE_16X16;
3156 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3157 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3158 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3159 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3161 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3162 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3163 s->mv_type = MV_TYPE_16X16;
3165 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3166 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3167 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3168 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3169 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3170 &dmin, &next_block, 0, 0);
3172 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3173 s->mv_dir = MV_DIR_FORWARD;
3174 s->mv_type = MV_TYPE_FIELD;
3177 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3178 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3179 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3181 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3182 &dmin, &next_block, 0, 0);
3184 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3185 s->mv_dir = MV_DIR_BACKWARD;
3186 s->mv_type = MV_TYPE_FIELD;
3189 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3190 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3191 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3193 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3194 &dmin, &next_block, 0, 0);
3196 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3197 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3198 s->mv_type = MV_TYPE_FIELD;
3200 for(dir=0; dir<2; dir++){
3202 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3203 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3204 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3207 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3208 &dmin, &next_block, 0, 0);
3210 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3212 s->mv_type = MV_TYPE_16X16;
3216 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3217 &dmin, &next_block, 0, 0);
3218 if(s->h263_pred || s->h263_aic){
3220 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3222 ff_clean_intra_table_entries(s); //old mode?
3226 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3227 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3228 const int last_qp= backup_s.qscale;
3231 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3232 static const int dquant_tab[4]={-1,1,-2,2};
3233 int storecoefs = s->mb_intra && s->dc_val[0];
3235 av_assert2(backup_s.dquant == 0);
3238 s->mv_dir= best_s.mv_dir;
3239 s->mv_type = MV_TYPE_16X16;
3240 s->mb_intra= best_s.mb_intra;
3241 s->mv[0][0][0] = best_s.mv[0][0][0];
3242 s->mv[0][0][1] = best_s.mv[0][0][1];
3243 s->mv[1][0][0] = best_s.mv[1][0][0];
3244 s->mv[1][0][1] = best_s.mv[1][0][1];
3246 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3247 for(; qpi<4; qpi++){
3248 int dquant= dquant_tab[qpi];
3249 qp= last_qp + dquant;
3250 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3252 backup_s.dquant= dquant;
3255 dc[i]= s->dc_val[0][ s->block_index[i] ];
3256 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3260 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3261 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3262 if(best_s.qscale != qp){
3265 s->dc_val[0][ s->block_index[i] ]= dc[i];
3266 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3273 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3274 int mx= s->b_direct_mv_table[xy][0];
3275 int my= s->b_direct_mv_table[xy][1];
3277 backup_s.dquant = 0;
3278 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3280 ff_mpeg4_set_direct_mv(s, mx, my);
3281 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3282 &dmin, &next_block, mx, my);
3284 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3285 backup_s.dquant = 0;
3286 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3288 ff_mpeg4_set_direct_mv(s, 0, 0);
3289 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3290 &dmin, &next_block, 0, 0);
3292 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3295 coded |= s->block_last_index[i];
3298 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3299 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3300 mx=my=0; //FIXME find the one we actually used
3301 ff_mpeg4_set_direct_mv(s, mx, my);
3302 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3310 s->mv_dir= best_s.mv_dir;
3311 s->mv_type = best_s.mv_type;
3313 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3314 s->mv[0][0][1] = best_s.mv[0][0][1];
3315 s->mv[1][0][0] = best_s.mv[1][0][0];
3316 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3319 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3320 &dmin, &next_block, mx, my);
3325 s->current_picture.qscale_table[xy] = best_s.qscale;
3327 copy_context_after_encode(s, &best_s, -1);
3329 pb_bits_count= put_bits_count(&s->pb);
3330 flush_put_bits(&s->pb);
3331 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3334 if(s->data_partitioning){
3335 pb2_bits_count= put_bits_count(&s->pb2);
3336 flush_put_bits(&s->pb2);
3337 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3338 s->pb2= backup_s.pb2;
3340 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3341 flush_put_bits(&s->tex_pb);
3342 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3343 s->tex_pb= backup_s.tex_pb;
3345 s->last_bits= put_bits_count(&s->pb);
3347 if (CONFIG_H263_ENCODER &&
3348 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3349 ff_h263_update_motion_val(s);
3351 if(next_block==0){ //FIXME 16 vs linesize16
3352 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3353 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3354 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3357 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3358 ff_mpv_reconstruct_mb(s, s->block);
3360 int motion_x = 0, motion_y = 0;
3361 s->mv_type=MV_TYPE_16X16;
3362 // only one MB-Type possible
3365 case CANDIDATE_MB_TYPE_INTRA:
3368 motion_x= s->mv[0][0][0] = 0;
3369 motion_y= s->mv[0][0][1] = 0;
3371 case CANDIDATE_MB_TYPE_INTER:
3372 s->mv_dir = MV_DIR_FORWARD;
3374 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3375 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3377 case CANDIDATE_MB_TYPE_INTER_I:
3378 s->mv_dir = MV_DIR_FORWARD;
3379 s->mv_type = MV_TYPE_FIELD;
3382 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3383 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3384 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3387 case CANDIDATE_MB_TYPE_INTER4V:
3388 s->mv_dir = MV_DIR_FORWARD;
3389 s->mv_type = MV_TYPE_8X8;
3392 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3393 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3396 case CANDIDATE_MB_TYPE_DIRECT:
3397 if (CONFIG_MPEG4_ENCODER) {
3398 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3400 motion_x=s->b_direct_mv_table[xy][0];
3401 motion_y=s->b_direct_mv_table[xy][1];
3402 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3405 case CANDIDATE_MB_TYPE_DIRECT0:
3406 if (CONFIG_MPEG4_ENCODER) {
3407 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3409 ff_mpeg4_set_direct_mv(s, 0, 0);
3412 case CANDIDATE_MB_TYPE_BIDIR:
3413 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3415 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3416 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3417 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3418 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3420 case CANDIDATE_MB_TYPE_BACKWARD:
3421 s->mv_dir = MV_DIR_BACKWARD;
3423 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3424 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3426 case CANDIDATE_MB_TYPE_FORWARD:
3427 s->mv_dir = MV_DIR_FORWARD;
3429 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3430 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3432 case CANDIDATE_MB_TYPE_FORWARD_I:
3433 s->mv_dir = MV_DIR_FORWARD;
3434 s->mv_type = MV_TYPE_FIELD;
3437 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3438 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3439 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3442 case CANDIDATE_MB_TYPE_BACKWARD_I:
3443 s->mv_dir = MV_DIR_BACKWARD;
3444 s->mv_type = MV_TYPE_FIELD;
3447 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3448 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3449 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3452 case CANDIDATE_MB_TYPE_BIDIR_I:
3453 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3454 s->mv_type = MV_TYPE_FIELD;
3456 for(dir=0; dir<2; dir++){
3458 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3459 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3460 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3465 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3468 encode_mb(s, motion_x, motion_y);
3470 // RAL: Update last macroblock type
3471 s->last_mv_dir = s->mv_dir;
3473 if (CONFIG_H263_ENCODER &&
3474 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3475 ff_h263_update_motion_val(s);
3477 ff_mpv_reconstruct_mb(s, s->block);
3480 /* clean the MV table in IPS frames for direct mode in B-frames */
3481 if(s->mb_intra /* && I,P,S_TYPE */){
3482 s->p_mv_table[xy][0]=0;
3483 s->p_mv_table[xy][1]=0;
3486 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3490 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3491 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3493 s->current_picture.encoding_error[0] += sse(
3494 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3495 s->dest[0], w, h, s->linesize);
3496 s->current_picture.encoding_error[1] += sse(
3497 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3498 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3499 s->current_picture.encoding_error[2] += sse(
3500 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3501 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3504 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3505 ff_h263_loop_filter(s);
3507 ff_dlog(s->avctx, "MB %d %d bits\n",
3508 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3512 //not beautiful here but we must write it before flushing so it has to be here
3513 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3514 ff_msmpeg4_encode_ext_header(s);
3518 #if FF_API_RTP_CALLBACK
3519 FF_DISABLE_DEPRECATION_WARNINGS
3520 /* Send the last GOB if RTP */
3521 if (s->avctx->rtp_callback) {
3522 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3523 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3524 /* Call the RTP callback to send the last GOB */
3526 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3528 FF_ENABLE_DEPRECATION_WARNINGS
3534 #define MERGE(field) dst->field += src->field; src->field=0
3535 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3536 MERGE(me.scene_change_score);
3537 MERGE(me.mc_mb_var_sum_temp);
3538 MERGE(me.mb_var_sum_temp);
3541 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3544 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3545 MERGE(dct_count[1]);
3554 MERGE(er.error_count);
3555 MERGE(padding_bug_score);
3556 MERGE(current_picture.encoding_error[0]);
3557 MERGE(current_picture.encoding_error[1]);
3558 MERGE(current_picture.encoding_error[2]);
3560 if (dst->noise_reduction){
3561 for(i=0; i<64; i++){
3562 MERGE(dct_error_sum[0][i]);
3563 MERGE(dct_error_sum[1][i]);
3567 av_assert1(put_bits_count(&src->pb) % 8 ==0);
3568 av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3569 ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3570 flush_put_bits(&dst->pb);
3573 static int estimate_qp(MpegEncContext *s, int dry_run){
3574 if (s->next_lambda){
3575 s->current_picture_ptr->f->quality =
3576 s->current_picture.f->quality = s->next_lambda;
3577 if(!dry_run) s->next_lambda= 0;
3578 } else if (!s->fixed_qscale) {
3579 int quality = ff_rate_estimate_qscale(s, dry_run);
3580 s->current_picture_ptr->f->quality =
3581 s->current_picture.f->quality = quality;
3582 if (s->current_picture.f->quality < 0)
3586 if(s->adaptive_quant){
3587 switch(s->codec_id){
3588 case AV_CODEC_ID_MPEG4:
3589 if (CONFIG_MPEG4_ENCODER)
3590 ff_clean_mpeg4_qscales(s);
3592 case AV_CODEC_ID_H263:
3593 case AV_CODEC_ID_H263P:
3594 case AV_CODEC_ID_FLV1:
3595 if (CONFIG_H263_ENCODER)
3596 ff_clean_h263_qscales(s);
3599 ff_init_qscale_tab(s);
3602 s->lambda= s->lambda_table[0];
3605 s->lambda = s->current_picture.f->quality;
3610 /* must be called before writing the header */
3611 static void set_frame_distances(MpegEncContext * s){
3612 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3613 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3615 if(s->pict_type==AV_PICTURE_TYPE_B){
3616 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3617 av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3619 s->pp_time= s->time - s->last_non_b_time;
3620 s->last_non_b_time= s->time;
3621 av_assert1(s->picture_number==0 || s->pp_time > 0);
3625 static int encode_picture(MpegEncContext *s, int picture_number)
3629 int context_count = s->slice_context_count;
3631 s->picture_number = picture_number;
3633 /* Reset the average MB variance */
3634 s->me.mb_var_sum_temp =
3635 s->me.mc_mb_var_sum_temp = 0;
3637 /* we need to initialize some time vars before we can encode B-frames */
3638 // RAL: Condition added for MPEG1VIDEO
3639 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3640 set_frame_distances(s);
3641 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3642 ff_set_mpeg4_time(s);
3644 s->me.scene_change_score=0;
3646 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3648 if(s->pict_type==AV_PICTURE_TYPE_I){
3649 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3650 else s->no_rounding=0;
3651 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3652 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3653 s->no_rounding ^= 1;
3656 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3657 if (estimate_qp(s,1) < 0)
3659 ff_get_2pass_fcode(s);
3660 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3661 if(s->pict_type==AV_PICTURE_TYPE_B)
3662 s->lambda= s->last_lambda_for[s->pict_type];
3664 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3668 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3669 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3670 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3671 s->q_chroma_intra_matrix = s->q_intra_matrix;
3672 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3675 s->mb_intra=0; //for the rate distortion & bit compare functions
3676 for(i=1; i<context_count; i++){
3677 ret = ff_update_duplicate_context(s->thread_context[i], s);
3685 /* Estimate motion for every MB */
3686 if(s->pict_type != AV_PICTURE_TYPE_I){
3687 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3688 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3689 if (s->pict_type != AV_PICTURE_TYPE_B) {
3690 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3692 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3696 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3697 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3699 for(i=0; i<s->mb_stride*s->mb_height; i++)
3700 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3702 if(!s->fixed_qscale){
3703 /* finding spatial complexity for I-frame rate control */
3704 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3707 for(i=1; i<context_count; i++){
3708 merge_context_after_me(s, s->thread_context[i]);
3710 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3711 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3714 if (s->me.scene_change_score > s->scenechange_threshold &&
3715 s->pict_type == AV_PICTURE_TYPE_P) {
3716 s->pict_type= AV_PICTURE_TYPE_I;
3717 for(i=0; i<s->mb_stride*s->mb_height; i++)
3718 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3719 if(s->msmpeg4_version >= 3)
3721 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3722 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3726 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3727 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3729 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3731 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3732 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3733 s->f_code= FFMAX3(s->f_code, a, b);
3736 ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
3737 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3738 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3742 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3743 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3748 if(s->pict_type==AV_PICTURE_TYPE_B){
3751 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3752 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3753 s->f_code = FFMAX(a, b);
3755 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3756 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3757 s->b_code = FFMAX(a, b);
3759 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3760 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3761 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3762 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3763 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3765 for(dir=0; dir<2; dir++){
3768 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3769 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3770 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3771 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3779 if (estimate_qp(s, 0) < 0)
3782 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3783 s->pict_type == AV_PICTURE_TYPE_I &&
3784 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3785 s->qscale= 3; //reduce clipping problems
3787 if (s->out_format == FMT_MJPEG) {
3788 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3789 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3791 if (s->avctx->intra_matrix) {
3793 luma_matrix = s->avctx->intra_matrix;
3795 if (s->avctx->chroma_intra_matrix)
3796 chroma_matrix = s->avctx->chroma_intra_matrix;
3798 /* for mjpeg, we do include qscale in the matrix */
3800 int j = s->idsp.idct_permutation[i];
3802 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3803 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3805 s->y_dc_scale_table=
3806 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3807 s->chroma_intra_matrix[0] =
3808 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3809 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3810 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3811 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3812 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3815 if(s->codec_id == AV_CODEC_ID_AMV){
3816 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3817 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3819 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3821 s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3822 s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3824 s->y_dc_scale_table= y;
3825 s->c_dc_scale_table= c;
3826 s->intra_matrix[0] = 13;
3827 s->chroma_intra_matrix[0] = 14;
3828 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3829 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3830 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3831 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3835 if (s->out_format == FMT_SPEEDHQ) {
3836 s->y_dc_scale_table=
3837 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3840 //FIXME var duplication
3841 s->current_picture_ptr->f->key_frame =
3842 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3843 s->current_picture_ptr->f->pict_type =
3844 s->current_picture.f->pict_type = s->pict_type;
3846 if (s->current_picture.f->key_frame)
3847 s->picture_in_gop_number=0;
3849 s->mb_x = s->mb_y = 0;
3850 s->last_bits= put_bits_count(&s->pb);
3851 switch(s->out_format) {
3852 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3854 /* s->huffman == HUFFMAN_TABLE_OPTIMAL can only be true for MJPEG. */
3855 if (!CONFIG_MJPEG_ENCODER || s->huffman != HUFFMAN_TABLE_OPTIMAL)
3856 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3857 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3861 if (CONFIG_SPEEDHQ_ENCODER)
3862 ff_speedhq_encode_picture_header(s);
3865 if (CONFIG_H261_ENCODER)
3866 ff_h261_encode_picture_header(s, picture_number);
3869 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3870 ff_wmv2_encode_picture_header(s, picture_number);
3871 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3872 ff_msmpeg4_encode_picture_header(s, picture_number);
3873 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3874 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3877 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3878 ret = ff_rv10_encode_picture_header(s, picture_number);
3882 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3883 ff_rv20_encode_picture_header(s, picture_number);
3884 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3885 ff_flv_encode_picture_header(s, picture_number);
3886 else if (CONFIG_H263_ENCODER)
3887 ff_h263_encode_picture_header(s, picture_number);
3890 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3891 ff_mpeg1_encode_picture_header(s, picture_number);
3896 bits= put_bits_count(&s->pb);
3897 s->header_bits= bits - s->last_bits;
3899 for(i=1; i<context_count; i++){
3900 update_duplicate_context_after_me(s->thread_context[i], s);
3902 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3903 for(i=1; i<context_count; i++){
3904 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3905 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3906 merge_context_after_encode(s, s->thread_context[i]);
3912 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3913 const int intra= s->mb_intra;
3916 s->dct_count[intra]++;
3918 for(i=0; i<64; i++){
3919 int level= block[i];
3923 s->dct_error_sum[intra][i] += level;
3924 level -= s->dct_offset[intra][i];
3925 if(level<0) level=0;
3927 s->dct_error_sum[intra][i] -= level;
3928 level += s->dct_offset[intra][i];
3929 if(level>0) level=0;
3936 static int dct_quantize_trellis_c(MpegEncContext *s,
3937 int16_t *block, int n,
3938 int qscale, int *overflow){
3940 const uint16_t *matrix;
3941 const uint8_t *scantable;
3942 const uint8_t *perm_scantable;
3944 unsigned int threshold1, threshold2;
3956 int coeff_count[64];
3957 int qmul, qadd, start_i, last_non_zero, i, dc;
3958 const int esc_length= s->ac_esc_length;
3960 uint8_t * last_length;
3961 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3964 s->fdsp.fdct(block);
3966 if(s->dct_error_sum)
3967 s->denoise_dct(s, block);
3969 qadd= ((qscale-1)|1)*8;
3971 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3972 else mpeg2_qscale = qscale << 1;
3976 scantable= s->intra_scantable.scantable;
3977 perm_scantable= s->intra_scantable.permutated;
3985 /* For AIC we skip quant/dequant of INTRADC */
3990 /* note: block[0] is assumed to be positive */
3991 block[0] = (block[0] + (q >> 1)) / q;
3994 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3995 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3996 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3997 bias= 1<<(QMAT_SHIFT-1);
3999 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4000 length = s->intra_chroma_ac_vlc_length;
4001 last_length= s->intra_chroma_ac_vlc_last_length;
4003 length = s->intra_ac_vlc_length;
4004 last_length= s->intra_ac_vlc_last_length;
4007 scantable= s->inter_scantable.scantable;
4008 perm_scantable= s->inter_scantable.permutated;
4011 qmat = s->q_inter_matrix[qscale];
4012 matrix = s->inter_matrix;
4013 length = s->inter_ac_vlc_length;
4014 last_length= s->inter_ac_vlc_last_length;
4018 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4019 threshold2= (threshold1<<1);
4021 for(i=63; i>=start_i; i--) {
4022 const int j = scantable[i];
4023 int level = block[j] * qmat[j];
4025 if(((unsigned)(level+threshold1))>threshold2){
4031 for(i=start_i; i<=last_non_zero; i++) {
4032 const int j = scantable[i];
4033 int level = block[j] * qmat[j];
4035 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4036 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4037 if(((unsigned)(level+threshold1))>threshold2){
4039 level= (bias + level)>>QMAT_SHIFT;
4041 coeff[1][i]= level-1;
4042 // coeff[2][k]= level-2;
4044 level= (bias - level)>>QMAT_SHIFT;
4045 coeff[0][i]= -level;
4046 coeff[1][i]= -level+1;
4047 // coeff[2][k]= -level+2;
4049 coeff_count[i]= FFMIN(level, 2);
4050 av_assert2(coeff_count[i]);
4053 coeff[0][i]= (level>>31)|1;
4058 *overflow= s->max_qcoeff < max; //overflow might have happened
4060 if(last_non_zero < start_i){
4061 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4062 return last_non_zero;
4065 score_tab[start_i]= 0;
4066 survivor[0]= start_i;
4069 for(i=start_i; i<=last_non_zero; i++){
4070 int level_index, j, zero_distortion;
4071 int dct_coeff= FFABS(block[ scantable[i] ]);
4072 int best_score=256*256*256*120;
4074 if (s->fdsp.fdct == ff_fdct_ifast)
4075 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4076 zero_distortion= dct_coeff*dct_coeff;
4078 for(level_index=0; level_index < coeff_count[i]; level_index++){
4080 int level= coeff[level_index][i];
4081 const int alevel= FFABS(level);
4086 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4087 unquant_coeff= alevel*qmul + qadd;
4088 } else if(s->out_format == FMT_MJPEG) {
4089 j = s->idsp.idct_permutation[scantable[i]];
4090 unquant_coeff = alevel * matrix[j] * 8;
4092 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4094 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4095 unquant_coeff = (unquant_coeff - 1) | 1;
4097 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4098 unquant_coeff = (unquant_coeff - 1) | 1;
4103 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4105 if((level&(~127)) == 0){
4106 for(j=survivor_count-1; j>=0; j--){
4107 int run= i - survivor[j];
4108 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4109 score += score_tab[i-run];
4111 if(score < best_score){
4114 level_tab[i+1]= level-64;
4118 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4119 for(j=survivor_count-1; j>=0; j--){
4120 int run= i - survivor[j];
4121 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4122 score += score_tab[i-run];
4123 if(score < last_score){
4126 last_level= level-64;
4132 distortion += esc_length*lambda;
4133 for(j=survivor_count-1; j>=0; j--){
4134 int run= i - survivor[j];
4135 int score= distortion + score_tab[i-run];
4137 if(score < best_score){
4140 level_tab[i+1]= level-64;
4144 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4145 for(j=survivor_count-1; j>=0; j--){
4146 int run= i - survivor[j];
4147 int score= distortion + score_tab[i-run];
4148 if(score < last_score){
4151 last_level= level-64;
4159 score_tab[i+1]= best_score;
4161 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4162 if(last_non_zero <= 27){
4163 for(; survivor_count; survivor_count--){
4164 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4168 for(; survivor_count; survivor_count--){
4169 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4174 survivor[ survivor_count++ ]= i+1;
4177 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4178 last_score= 256*256*256*120;
4179 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4180 int score= score_tab[i];
4182 score += lambda * 2; // FIXME more exact?
4184 if(score < last_score){
4187 last_level= level_tab[i];
4188 last_run= run_tab[i];
4193 s->coded_score[n] = last_score;
4195 dc= FFABS(block[0]);
4196 last_non_zero= last_i - 1;
4197 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4199 if(last_non_zero < start_i)
4200 return last_non_zero;
4202 if(last_non_zero == 0 && start_i == 0){
4204 int best_score= dc * dc;
4206 for(i=0; i<coeff_count[0]; i++){
4207 int level= coeff[i][0];
4208 int alevel= FFABS(level);
4209 int unquant_coeff, score, distortion;
4211 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4212 unquant_coeff= (alevel*qmul + qadd)>>3;
4214 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4215 unquant_coeff = (unquant_coeff - 1) | 1;
4217 unquant_coeff = (unquant_coeff + 4) >> 3;
4218 unquant_coeff<<= 3 + 3;
4220 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4222 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4223 else score= distortion + esc_length*lambda;
4225 if(score < best_score){
4227 best_level= level - 64;
4230 block[0]= best_level;
4231 s->coded_score[n] = best_score - dc*dc;
4232 if(best_level == 0) return -1;
4233 else return last_non_zero;
4237 av_assert2(last_level);
4239 block[ perm_scantable[last_non_zero] ]= last_level;
4242 for(; i>start_i; i -= run_tab[i] + 1){
4243 block[ perm_scantable[i-1] ]= level_tab[i];
4246 return last_non_zero;
4249 static int16_t basis[64][64];
4251 static void build_basis(uint8_t *perm){
4258 double s= 0.25*(1<<BASIS_SHIFT);
4260 int perm_index= perm[index];
4261 if(i==0) s*= sqrt(0.5);
4262 if(j==0) s*= sqrt(0.5);
4263 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4270 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4271 int16_t *block, int16_t *weight, int16_t *orig,
4274 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4275 const uint8_t *scantable;
4276 const uint8_t *perm_scantable;
4277 // unsigned int threshold1, threshold2;
4282 int qmul, qadd, start_i, last_non_zero, i, dc;
4284 uint8_t * last_length;
4286 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4288 if(basis[0][0] == 0)
4289 build_basis(s->idsp.idct_permutation);
4294 scantable= s->intra_scantable.scantable;
4295 perm_scantable= s->intra_scantable.permutated;
4302 /* For AIC we skip quant/dequant of INTRADC */
4306 q <<= RECON_SHIFT-3;
4307 /* note: block[0] is assumed to be positive */
4309 // block[0] = (block[0] + (q >> 1)) / q;
4311 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4312 // bias= 1<<(QMAT_SHIFT-1);
4313 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4314 length = s->intra_chroma_ac_vlc_length;
4315 last_length= s->intra_chroma_ac_vlc_last_length;
4317 length = s->intra_ac_vlc_length;
4318 last_length= s->intra_ac_vlc_last_length;
4321 scantable= s->inter_scantable.scantable;
4322 perm_scantable= s->inter_scantable.permutated;
4325 length = s->inter_ac_vlc_length;
4326 last_length= s->inter_ac_vlc_last_length;
4328 last_non_zero = s->block_last_index[n];
4330 dc += (1<<(RECON_SHIFT-1));
4331 for(i=0; i<64; i++){
4332 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4336 for(i=0; i<64; i++){
4341 w= FFABS(weight[i]) + qns*one;
4342 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4345 // w=weight[i] = (63*qns + (w/2)) / w;
4348 av_assert2(w<(1<<6));
4351 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4355 for(i=start_i; i<=last_non_zero; i++){
4356 int j= perm_scantable[i];
4357 const int level= block[j];
4361 if(level<0) coeff= qmul*level - qadd;
4362 else coeff= qmul*level + qadd;
4363 run_tab[rle_index++]=run;
4366 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4373 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4376 int run2, best_unquant_change=0, analyze_gradient;
4377 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4379 if(analyze_gradient){
4380 for(i=0; i<64; i++){
4383 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4389 const int level= block[0];
4390 int change, old_coeff;
4392 av_assert2(s->mb_intra);
4396 for(change=-1; change<=1; change+=2){
4397 int new_level= level + change;
4398 int score, new_coeff;
4400 new_coeff= q*new_level;
4401 if(new_coeff >= 2048 || new_coeff < 0)
4404 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4405 new_coeff - old_coeff);
4406 if(score<best_score){
4409 best_change= change;
4410 best_unquant_change= new_coeff - old_coeff;
4417 run2= run_tab[rle_index++];
4421 for(i=start_i; i<64; i++){
4422 int j= perm_scantable[i];
4423 const int level= block[j];
4424 int change, old_coeff;
4426 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4430 if(level<0) old_coeff= qmul*level - qadd;
4431 else old_coeff= qmul*level + qadd;
4432 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4436 av_assert2(run2>=0 || i >= last_non_zero );
4439 for(change=-1; change<=1; change+=2){
4440 int new_level= level + change;
4441 int score, new_coeff, unquant_change;
4444 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4448 if(new_level<0) new_coeff= qmul*new_level - qadd;
4449 else new_coeff= qmul*new_level + qadd;
4450 if(new_coeff >= 2048 || new_coeff <= -2048)
4452 //FIXME check for overflow
4455 if(level < 63 && level > -63){
4456 if(i < last_non_zero)
4457 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4458 - length[UNI_AC_ENC_INDEX(run, level+64)];
4460 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4461 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4464 av_assert2(FFABS(new_level)==1);
4466 if(analyze_gradient){
4467 int g= d1[ scantable[i] ];
4468 if(g && (g^new_level) >= 0)
4472 if(i < last_non_zero){
4473 int next_i= i + run2 + 1;
4474 int next_level= block[ perm_scantable[next_i] ] + 64;
4476 if(next_level&(~127))
4479 if(next_i < last_non_zero)
4480 score += length[UNI_AC_ENC_INDEX(run, 65)]
4481 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4482 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4484 score += length[UNI_AC_ENC_INDEX(run, 65)]
4485 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4486 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4488 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4490 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4491 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4497 av_assert2(FFABS(level)==1);
4499 if(i < last_non_zero){
4500 int next_i= i + run2 + 1;
4501 int next_level= block[ perm_scantable[next_i] ] + 64;
4503 if(next_level&(~127))
4506 if(next_i < last_non_zero)
4507 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4508 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4509 - length[UNI_AC_ENC_INDEX(run, 65)];
4511 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4512 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4513 - length[UNI_AC_ENC_INDEX(run, 65)];
4515 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4517 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4518 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4525 unquant_change= new_coeff - old_coeff;
4526 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4528 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4530 if(score<best_score){
4533 best_change= change;
4534 best_unquant_change= unquant_change;
4538 prev_level= level + 64;
4539 if(prev_level&(~127))
4549 int j= perm_scantable[ best_coeff ];
4551 block[j] += best_change;
4553 if(best_coeff > last_non_zero){
4554 last_non_zero= best_coeff;
4555 av_assert2(block[j]);
4557 for(; last_non_zero>=start_i; last_non_zero--){
4558 if(block[perm_scantable[last_non_zero]])
4565 for(i=start_i; i<=last_non_zero; i++){
4566 int j= perm_scantable[i];
4567 const int level= block[j];
4570 run_tab[rle_index++]=run;
4577 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4583 return last_non_zero;
4587 * Permute an 8x8 block according to permutation.
4588 * @param block the block which will be permuted according to
4589 * the given permutation vector
4590 * @param permutation the permutation vector
4591 * @param last the last non zero coefficient in scantable order, used to
4592 * speed the permutation up
4593 * @param scantable the used scantable, this is only used to speed the
4594 * permutation up, the block is not (inverse) permutated
4595 * to scantable order!
4597 void ff_block_permute(int16_t *block, uint8_t *permutation,
4598 const uint8_t *scantable, int last)
4605 //FIXME it is ok but not clean and might fail for some permutations
4606 // if (permutation[1] == 1)
4609 for (i = 0; i <= last; i++) {
4610 const int j = scantable[i];
4615 for (i = 0; i <= last; i++) {
4616 const int j = scantable[i];
4617 const int perm_j = permutation[j];
4618 block[perm_j] = temp[j];
4622 int ff_dct_quantize_c(MpegEncContext *s,
4623 int16_t *block, int n,
4624 int qscale, int *overflow)
4626 int i, j, level, last_non_zero, q, start_i;
4628 const uint8_t *scantable;
4631 unsigned int threshold1, threshold2;
4633 s->fdsp.fdct(block);
4635 if(s->dct_error_sum)
4636 s->denoise_dct(s, block);
4639 scantable= s->intra_scantable.scantable;
4647 /* For AIC we skip quant/dequant of INTRADC */
4650 /* note: block[0] is assumed to be positive */
4651 block[0] = (block[0] + (q >> 1)) / q;
4654 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4655 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4657 scantable= s->inter_scantable.scantable;
4660 qmat = s->q_inter_matrix[qscale];
4661 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4663 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4664 threshold2= (threshold1<<1);
4665 for(i=63;i>=start_i;i--) {
4667 level = block[j] * qmat[j];
4669 if(((unsigned)(level+threshold1))>threshold2){
4676 for(i=start_i; i<=last_non_zero; i++) {
4678 level = block[j] * qmat[j];
4680 // if( bias+level >= (1<<QMAT_SHIFT)
4681 // || bias-level >= (1<<QMAT_SHIFT)){
4682 if(((unsigned)(level+threshold1))>threshold2){
4684 level= (bias + level)>>QMAT_SHIFT;
4687 level= (bias - level)>>QMAT_SHIFT;
4695 *overflow= s->max_qcoeff < max; //overflow might have happened
4697 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4698 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4699 ff_block_permute(block, s->idsp.idct_permutation,
4700 scantable, last_non_zero);
4702 return last_non_zero;
4705 #define OFFSET(x) offsetof(MpegEncContext, x)
4706 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4707 static const AVOption h263_options[] = {
4708 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4709 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4714 static const AVClass h263_class = {
4715 .class_name = "H.263 encoder",
4716 .item_name = av_default_item_name,
4717 .option = h263_options,
4718 .version = LIBAVUTIL_VERSION_INT,
4721 AVCodec ff_h263_encoder = {
4723 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4724 .type = AVMEDIA_TYPE_VIDEO,
4725 .id = AV_CODEC_ID_H263,
4726 .priv_data_size = sizeof(MpegEncContext),
4727 .init = ff_mpv_encode_init,
4728 .encode2 = ff_mpv_encode_picture,
4729 .close = ff_mpv_encode_end,
4730 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4731 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4732 .priv_class = &h263_class,
4735 static const AVOption h263p_options[] = {
4736 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4737 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4738 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4739 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4743 static const AVClass h263p_class = {
4744 .class_name = "H.263p encoder",
4745 .item_name = av_default_item_name,
4746 .option = h263p_options,
4747 .version = LIBAVUTIL_VERSION_INT,
4750 AVCodec ff_h263p_encoder = {
4752 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4753 .type = AVMEDIA_TYPE_VIDEO,
4754 .id = AV_CODEC_ID_H263P,
4755 .priv_data_size = sizeof(MpegEncContext),
4756 .init = ff_mpv_encode_init,
4757 .encode2 = ff_mpv_encode_picture,
4758 .close = ff_mpv_encode_end,
4759 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4760 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4761 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4762 .priv_class = &h263p_class,
4765 static const AVClass msmpeg4v2_class = {
4766 .class_name = "msmpeg4v2 encoder",
4767 .item_name = av_default_item_name,
4768 .option = ff_mpv_generic_options,
4769 .version = LIBAVUTIL_VERSION_INT,
4772 AVCodec ff_msmpeg4v2_encoder = {
4773 .name = "msmpeg4v2",
4774 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4775 .type = AVMEDIA_TYPE_VIDEO,
4776 .id = AV_CODEC_ID_MSMPEG4V2,
4777 .priv_data_size = sizeof(MpegEncContext),
4778 .init = ff_mpv_encode_init,
4779 .encode2 = ff_mpv_encode_picture,
4780 .close = ff_mpv_encode_end,
4781 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4782 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4783 .priv_class = &msmpeg4v2_class,
4786 static const AVClass msmpeg4v3_class = {
4787 .class_name = "msmpeg4v3 encoder",
4788 .item_name = av_default_item_name,
4789 .option = ff_mpv_generic_options,
4790 .version = LIBAVUTIL_VERSION_INT,
4793 AVCodec ff_msmpeg4v3_encoder = {
4795 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4796 .type = AVMEDIA_TYPE_VIDEO,
4797 .id = AV_CODEC_ID_MSMPEG4V3,
4798 .priv_data_size = sizeof(MpegEncContext),
4799 .init = ff_mpv_encode_init,
4800 .encode2 = ff_mpv_encode_picture,
4801 .close = ff_mpv_encode_end,
4802 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4803 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4804 .priv_class = &msmpeg4v3_class,
4807 static const AVClass wmv1_class = {
4808 .class_name = "wmv1 encoder",
4809 .item_name = av_default_item_name,
4810 .option = ff_mpv_generic_options,
4811 .version = LIBAVUTIL_VERSION_INT,
4814 AVCodec ff_wmv1_encoder = {
4816 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4817 .type = AVMEDIA_TYPE_VIDEO,
4818 .id = AV_CODEC_ID_WMV1,
4819 .priv_data_size = sizeof(MpegEncContext),
4820 .init = ff_mpv_encode_init,
4821 .encode2 = ff_mpv_encode_picture,
4822 .close = ff_mpv_encode_end,
4823 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4824 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4825 .priv_class = &wmv1_class,