2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
31 * The simplest mpeg encoder (well, it was the simplest!).
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
45 #include "mpegvideo.h"
46 #include "mpegvideodata.h"
50 #include "mjpegenc_common.h"
52 #include "mpegutils.h"
54 #include "speedhqenc.h"
56 #include "pixblockdsp.h"
60 #include "aandcttab.h"
62 #include "mpeg4video.h"
64 #include "bytestream.h"
67 #include "packet_internal.h"
72 #define QUANT_BIAS_SHIFT 8
74 #define QMAT_SHIFT_MMX 16
77 static int encode_picture(MpegEncContext *s, int picture_number);
78 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
79 static int sse_mb(MpegEncContext *s);
80 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
81 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
83 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
84 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
86 const AVOption ff_mpv_generic_options[] = {
91 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
92 uint16_t (*qmat16)[2][64],
93 const uint16_t *quant_matrix,
94 int bias, int qmin, int qmax, int intra)
96 FDCTDSPContext *fdsp = &s->fdsp;
100 for (qscale = qmin; qscale <= qmax; qscale++) {
104 if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
105 else qscale2 = qscale << 1;
107 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
109 fdsp->fdct == ff_faandct ||
110 #endif /* CONFIG_FAANDCT */
111 fdsp->fdct == ff_jpeg_fdct_islow_10) {
112 for (i = 0; i < 64; i++) {
113 const int j = s->idsp.idct_permutation[i];
114 int64_t den = (int64_t) qscale2 * quant_matrix[j];
115 /* 16 <= qscale * quant_matrix[i] <= 7905
116 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
117 * 19952 <= x <= 249205026
118 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
119 * 3444240 >= (1 << 36) / (x) >= 275 */
121 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
123 } else if (fdsp->fdct == ff_fdct_ifast) {
124 for (i = 0; i < 64; i++) {
125 const int j = s->idsp.idct_permutation[i];
126 int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
127 /* 16 <= qscale * quant_matrix[i] <= 7905
128 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
129 * 19952 <= x <= 249205026
130 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
131 * 3444240 >= (1 << 36) / (x) >= 275 */
133 qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
136 for (i = 0; i < 64; i++) {
137 const int j = s->idsp.idct_permutation[i];
138 int64_t den = (int64_t) qscale2 * quant_matrix[j];
139 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
140 * Assume x = qscale * quant_matrix[i]
142 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
143 * so 32768 >= (1 << 19) / (x) >= 67 */
144 qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
145 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
146 // (qscale * quant_matrix[i]);
147 qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
149 if (qmat16[qscale][0][i] == 0 ||
150 qmat16[qscale][0][i] == 128 * 256)
151 qmat16[qscale][0][i] = 128 * 256 - 1;
152 qmat16[qscale][1][i] =
153 ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
154 qmat16[qscale][0][i]);
158 for (i = intra; i < 64; i++) {
160 if (fdsp->fdct == ff_fdct_ifast) {
161 max = (8191LL * ff_aanscales[i]) >> 14;
163 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
169 av_log(s->avctx, AV_LOG_INFO,
170 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
175 static inline void update_qscale(MpegEncContext *s)
177 if (s->q_scale_type == 1 && 0) {
179 int bestdiff=INT_MAX;
182 for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
183 int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
184 if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
185 (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
187 if (diff < bestdiff) {
194 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
195 (FF_LAMBDA_SHIFT + 7);
196 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
199 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
203 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
209 for (i = 0; i < 64; i++) {
210 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
217 * init s->current_picture.qscale_table from s->lambda_table
219 void ff_init_qscale_tab(MpegEncContext *s)
221 int8_t * const qscale_table = s->current_picture.qscale_table;
224 for (i = 0; i < s->mb_num; i++) {
225 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
226 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
227 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
232 static void update_duplicate_context_after_me(MpegEncContext *dst,
235 #define COPY(a) dst->a= src->a
237 COPY(current_picture);
243 COPY(picture_in_gop_number);
244 COPY(gop_picture_number);
245 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
246 COPY(progressive_frame); // FIXME don't set in encode_header
247 COPY(partitioned_frame); // FIXME don't set in encode_header
252 * Set the given MpegEncContext to defaults for encoding.
253 * the changed fields will not depend upon the prior state of the MpegEncContext.
255 static void mpv_encode_defaults(MpegEncContext *s)
258 ff_mpv_common_defaults(s);
260 for (i = -16; i < 16; i++) {
261 default_fcode_tab[i + MAX_MV] = 1;
263 s->me.mv_penalty = default_mv_penalty;
264 s->fcode_tab = default_fcode_tab;
266 s->input_picture_number = 0;
267 s->picture_in_gop_number = 0;
270 av_cold int ff_dct_encode_init(MpegEncContext *s)
273 ff_dct_encode_init_x86(s);
275 if (CONFIG_H263_ENCODER)
276 ff_h263dsp_init(&s->h263dsp);
277 if (!s->dct_quantize)
278 s->dct_quantize = ff_dct_quantize_c;
280 s->denoise_dct = denoise_dct_c;
281 s->fast_dct_quantize = s->dct_quantize;
282 if (s->avctx->trellis)
283 s->dct_quantize = dct_quantize_trellis_c;
288 /* init video encoder */
289 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
291 MpegEncContext *s = avctx->priv_data;
292 AVCPBProperties *cpb_props;
293 int i, ret, format_supported;
295 mpv_encode_defaults(s);
297 switch (avctx->codec_id) {
298 case AV_CODEC_ID_MPEG2VIDEO:
299 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
300 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
301 av_log(avctx, AV_LOG_ERROR,
302 "only YUV420 and YUV422 are supported\n");
303 return AVERROR(EINVAL);
306 case AV_CODEC_ID_MJPEG:
307 case AV_CODEC_ID_AMV:
308 format_supported = 0;
309 /* JPEG color space */
310 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
311 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
312 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
313 (avctx->color_range == AVCOL_RANGE_JPEG &&
314 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
315 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
316 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
317 format_supported = 1;
318 /* MPEG color space */
319 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
320 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
321 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
322 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
323 format_supported = 1;
325 if (!format_supported) {
326 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
327 return AVERROR(EINVAL);
330 case AV_CODEC_ID_SPEEDHQ:
331 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
332 avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
333 avctx->pix_fmt != AV_PIX_FMT_YUV444P) {
334 av_log(avctx, AV_LOG_ERROR,
335 "only YUV420/YUV422/YUV444 are supported (no alpha support yet)\n");
336 return AVERROR(EINVAL);
340 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
341 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
342 return AVERROR(EINVAL);
346 switch (avctx->pix_fmt) {
347 case AV_PIX_FMT_YUVJ444P:
348 case AV_PIX_FMT_YUV444P:
349 s->chroma_format = CHROMA_444;
351 case AV_PIX_FMT_YUVJ422P:
352 case AV_PIX_FMT_YUV422P:
353 s->chroma_format = CHROMA_422;
355 case AV_PIX_FMT_YUVJ420P:
356 case AV_PIX_FMT_YUV420P:
358 s->chroma_format = CHROMA_420;
362 avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
364 #if FF_API_PRIVATE_OPT
365 FF_DISABLE_DEPRECATION_WARNINGS
366 if (avctx->rtp_payload_size)
367 s->rtp_payload_size = avctx->rtp_payload_size;
368 if (avctx->me_penalty_compensation)
369 s->me_penalty_compensation = avctx->me_penalty_compensation;
371 s->me_pre = avctx->pre_me;
372 FF_ENABLE_DEPRECATION_WARNINGS
375 s->bit_rate = avctx->bit_rate;
376 s->width = avctx->width;
377 s->height = avctx->height;
378 if (avctx->gop_size > 600 &&
379 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
380 av_log(avctx, AV_LOG_WARNING,
381 "keyframe interval too large!, reducing it from %d to %d\n",
382 avctx->gop_size, 600);
383 avctx->gop_size = 600;
385 s->gop_size = avctx->gop_size;
387 if (avctx->max_b_frames > MAX_B_FRAMES) {
388 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
389 "is %d.\n", MAX_B_FRAMES);
390 avctx->max_b_frames = MAX_B_FRAMES;
392 s->max_b_frames = avctx->max_b_frames;
393 s->codec_id = avctx->codec->id;
394 s->strict_std_compliance = avctx->strict_std_compliance;
395 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
396 s->rtp_mode = !!s->rtp_payload_size;
397 s->intra_dc_precision = avctx->intra_dc_precision;
399 // workaround some differences between how applications specify dc precision
400 if (s->intra_dc_precision < 0) {
401 s->intra_dc_precision += 8;
402 } else if (s->intra_dc_precision >= 8)
403 s->intra_dc_precision -= 8;
405 if (s->intra_dc_precision < 0) {
406 av_log(avctx, AV_LOG_ERROR,
407 "intra dc precision must be positive, note some applications use"
408 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
409 return AVERROR(EINVAL);
412 if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
415 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
416 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
417 return AVERROR(EINVAL);
419 s->user_specified_pts = AV_NOPTS_VALUE;
421 if (s->gop_size <= 1) {
429 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
431 s->adaptive_quant = (avctx->lumi_masking ||
432 avctx->dark_masking ||
433 avctx->temporal_cplx_masking ||
434 avctx->spatial_cplx_masking ||
437 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
440 s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
442 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
443 switch(avctx->codec_id) {
444 case AV_CODEC_ID_MPEG1VIDEO:
445 case AV_CODEC_ID_MPEG2VIDEO:
446 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
448 case AV_CODEC_ID_MPEG4:
449 case AV_CODEC_ID_MSMPEG4V1:
450 case AV_CODEC_ID_MSMPEG4V2:
451 case AV_CODEC_ID_MSMPEG4V3:
452 if (avctx->rc_max_rate >= 15000000) {
453 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
454 } else if(avctx->rc_max_rate >= 2000000) {
455 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
456 } else if(avctx->rc_max_rate >= 384000) {
457 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
459 avctx->rc_buffer_size = 40;
460 avctx->rc_buffer_size *= 16384;
463 if (avctx->rc_buffer_size) {
464 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
468 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
469 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
470 return AVERROR(EINVAL);
473 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
474 av_log(avctx, AV_LOG_INFO,
475 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
478 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
479 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
480 return AVERROR(EINVAL);
483 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
484 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
485 return AVERROR(EINVAL);
488 if (avctx->rc_max_rate &&
489 avctx->rc_max_rate == avctx->bit_rate &&
490 avctx->rc_max_rate != avctx->rc_min_rate) {
491 av_log(avctx, AV_LOG_INFO,
492 "impossible bitrate constraints, this will fail\n");
495 if (avctx->rc_buffer_size &&
496 avctx->bit_rate * (int64_t)avctx->time_base.num >
497 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
498 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
499 return AVERROR(EINVAL);
502 if (!s->fixed_qscale &&
503 avctx->bit_rate * av_q2d(avctx->time_base) >
504 avctx->bit_rate_tolerance) {
505 av_log(avctx, AV_LOG_WARNING,
506 "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
507 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
510 if (avctx->rc_max_rate &&
511 avctx->rc_min_rate == avctx->rc_max_rate &&
512 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
513 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
514 90000LL * (avctx->rc_buffer_size - 1) >
515 avctx->rc_max_rate * 0xFFFFLL) {
516 av_log(avctx, AV_LOG_INFO,
517 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
518 "specified vbv buffer is too large for the given bitrate!\n");
521 if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
522 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
523 s->codec_id != AV_CODEC_ID_FLV1) {
524 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
525 return AVERROR(EINVAL);
528 if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
529 av_log(avctx, AV_LOG_ERROR,
530 "OBMC is only supported with simple mb decision\n");
531 return AVERROR(EINVAL);
534 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
535 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
536 return AVERROR(EINVAL);
539 if (s->max_b_frames &&
540 s->codec_id != AV_CODEC_ID_MPEG4 &&
541 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
542 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
543 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
544 return AVERROR(EINVAL);
546 if (s->max_b_frames < 0) {
547 av_log(avctx, AV_LOG_ERROR,
548 "max b frames must be 0 or positive for mpegvideo based encoders\n");
549 return AVERROR(EINVAL);
552 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
553 s->codec_id == AV_CODEC_ID_H263 ||
554 s->codec_id == AV_CODEC_ID_H263P) &&
555 (avctx->sample_aspect_ratio.num > 255 ||
556 avctx->sample_aspect_ratio.den > 255)) {
557 av_log(avctx, AV_LOG_WARNING,
558 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
559 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
560 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
561 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
564 if ((s->codec_id == AV_CODEC_ID_H263 ||
565 s->codec_id == AV_CODEC_ID_H263P) &&
566 (avctx->width > 2048 ||
567 avctx->height > 1152 )) {
568 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
569 return AVERROR(EINVAL);
571 if ((s->codec_id == AV_CODEC_ID_H263 ||
572 s->codec_id == AV_CODEC_ID_H263P) &&
573 ((avctx->width &3) ||
574 (avctx->height&3) )) {
575 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
576 return AVERROR(EINVAL);
579 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
580 (avctx->width > 4095 ||
581 avctx->height > 4095 )) {
582 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
583 return AVERROR(EINVAL);
586 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
587 (avctx->width > 16383 ||
588 avctx->height > 16383 )) {
589 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
590 return AVERROR(EINVAL);
593 if (s->codec_id == AV_CODEC_ID_RV10 &&
595 avctx->height&15 )) {
596 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
597 return AVERROR(EINVAL);
600 if (s->codec_id == AV_CODEC_ID_RV20 &&
603 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
604 return AVERROR(EINVAL);
607 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
608 s->codec_id == AV_CODEC_ID_WMV2) &&
610 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
611 return AVERROR(EINVAL);
614 if ((avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
615 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
616 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
617 return AVERROR(EINVAL);
620 #if FF_API_PRIVATE_OPT
621 FF_DISABLE_DEPRECATION_WARNINGS
622 if (avctx->mpeg_quant)
623 s->mpeg_quant = avctx->mpeg_quant;
624 FF_ENABLE_DEPRECATION_WARNINGS
627 // FIXME mpeg2 uses that too
628 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
629 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
630 av_log(avctx, AV_LOG_ERROR,
631 "mpeg2 style quantization not supported by codec\n");
632 return AVERROR(EINVAL);
635 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
636 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
637 return AVERROR(EINVAL);
640 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
641 avctx->mb_decision != FF_MB_DECISION_RD) {
642 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
643 return AVERROR(EINVAL);
646 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
647 (s->codec_id == AV_CODEC_ID_AMV ||
648 s->codec_id == AV_CODEC_ID_MJPEG)) {
649 // Used to produce garbage with MJPEG.
650 av_log(avctx, AV_LOG_ERROR,
651 "QP RD is no longer compatible with MJPEG or AMV\n");
652 return AVERROR(EINVAL);
655 #if FF_API_PRIVATE_OPT
656 FF_DISABLE_DEPRECATION_WARNINGS
657 if (avctx->scenechange_threshold)
658 s->scenechange_threshold = avctx->scenechange_threshold;
659 FF_ENABLE_DEPRECATION_WARNINGS
662 if (s->scenechange_threshold < 1000000000 &&
663 (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
664 av_log(avctx, AV_LOG_ERROR,
665 "closed gop with scene change detection are not supported yet, "
666 "set threshold to 1000000000\n");
667 return AVERROR_PATCHWELCOME;
670 if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
671 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
672 s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
673 av_log(avctx, AV_LOG_ERROR,
674 "low delay forcing is only available for mpeg2, "
675 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
676 return AVERROR(EINVAL);
678 if (s->max_b_frames != 0) {
679 av_log(avctx, AV_LOG_ERROR,
680 "B-frames cannot be used with low delay\n");
681 return AVERROR(EINVAL);
685 if (s->q_scale_type == 1) {
686 if (avctx->qmax > 28) {
687 av_log(avctx, AV_LOG_ERROR,
688 "non linear quant only supports qmax <= 28 currently\n");
689 return AVERROR_PATCHWELCOME;
693 if (avctx->slices > 1 &&
694 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
695 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
696 return AVERROR(EINVAL);
699 if (avctx->thread_count > 1 &&
700 s->codec_id != AV_CODEC_ID_MPEG4 &&
701 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
702 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
703 s->codec_id != AV_CODEC_ID_MJPEG &&
704 (s->codec_id != AV_CODEC_ID_H263P)) {
705 av_log(avctx, AV_LOG_ERROR,
706 "multi threaded encoding not supported by codec\n");
707 return AVERROR_PATCHWELCOME;
710 if (avctx->thread_count < 1) {
711 av_log(avctx, AV_LOG_ERROR,
712 "automatic thread number detection not supported by codec, "
714 return AVERROR_PATCHWELCOME;
717 if (!avctx->time_base.den || !avctx->time_base.num) {
718 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
719 return AVERROR(EINVAL);
722 #if FF_API_PRIVATE_OPT
723 FF_DISABLE_DEPRECATION_WARNINGS
724 if (avctx->b_frame_strategy)
725 s->b_frame_strategy = avctx->b_frame_strategy;
726 if (avctx->b_sensitivity != 40)
727 s->b_sensitivity = avctx->b_sensitivity;
728 FF_ENABLE_DEPRECATION_WARNINGS
731 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
732 av_log(avctx, AV_LOG_INFO,
733 "notice: b_frame_strategy only affects the first pass\n");
734 s->b_frame_strategy = 0;
737 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
739 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
740 avctx->time_base.den /= i;
741 avctx->time_base.num /= i;
745 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
746 // (a + x * 3 / 8) / x
747 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
748 s->inter_quant_bias = 0;
750 s->intra_quant_bias = 0;
752 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
755 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
756 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
757 return AVERROR(EINVAL);
760 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
762 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
763 avctx->time_base.den > (1 << 16) - 1) {
764 av_log(avctx, AV_LOG_ERROR,
765 "timebase %d/%d not supported by MPEG 4 standard, "
766 "the maximum admitted value for the timebase denominator "
767 "is %d\n", avctx->time_base.num, avctx->time_base.den,
769 return AVERROR(EINVAL);
771 s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
773 switch (avctx->codec->id) {
774 case AV_CODEC_ID_MPEG1VIDEO:
775 s->out_format = FMT_MPEG1;
776 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
777 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
779 case AV_CODEC_ID_MPEG2VIDEO:
780 s->out_format = FMT_MPEG1;
781 s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
782 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
785 case AV_CODEC_ID_MJPEG:
786 case AV_CODEC_ID_AMV:
787 s->out_format = FMT_MJPEG;
788 s->intra_only = 1; /* force intra only for jpeg */
789 if (!CONFIG_MJPEG_ENCODER)
790 return AVERROR_ENCODER_NOT_FOUND;
791 if ((ret = ff_mjpeg_encode_init(s)) < 0)
796 case AV_CODEC_ID_SPEEDHQ:
797 s->out_format = FMT_SPEEDHQ;
798 s->intra_only = 1; /* force intra only for SHQ */
799 if (!CONFIG_SPEEDHQ_ENCODER)
800 return AVERROR_ENCODER_NOT_FOUND;
801 if ((ret = ff_speedhq_encode_init(s)) < 0)
806 case AV_CODEC_ID_H261:
807 if (!CONFIG_H261_ENCODER)
808 return AVERROR_ENCODER_NOT_FOUND;
809 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
810 av_log(avctx, AV_LOG_ERROR,
811 "The specified picture size of %dx%d is not valid for the "
812 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
813 s->width, s->height);
814 return AVERROR(EINVAL);
816 s->out_format = FMT_H261;
819 s->rtp_mode = 0; /* Sliced encoding not supported */
821 case AV_CODEC_ID_H263:
822 if (!CONFIG_H263_ENCODER)
823 return AVERROR_ENCODER_NOT_FOUND;
824 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
825 s->width, s->height) == 8) {
826 av_log(avctx, AV_LOG_ERROR,
827 "The specified picture size of %dx%d is not valid for "
828 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
829 "352x288, 704x576, and 1408x1152. "
830 "Try H.263+.\n", s->width, s->height);
831 return AVERROR(EINVAL);
833 s->out_format = FMT_H263;
837 case AV_CODEC_ID_H263P:
838 s->out_format = FMT_H263;
841 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
842 s->modified_quant = s->h263_aic;
843 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
844 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
847 /* These are just to be sure */
851 case AV_CODEC_ID_FLV1:
852 s->out_format = FMT_H263;
853 s->h263_flv = 2; /* format = 1; 11-bit codes */
854 s->unrestricted_mv = 1;
855 s->rtp_mode = 0; /* don't allow GOB */
859 case AV_CODEC_ID_RV10:
860 s->out_format = FMT_H263;
864 case AV_CODEC_ID_RV20:
865 s->out_format = FMT_H263;
868 s->modified_quant = 1;
872 s->unrestricted_mv = 0;
874 case AV_CODEC_ID_MPEG4:
875 s->out_format = FMT_H263;
877 s->unrestricted_mv = 1;
878 s->low_delay = s->max_b_frames ? 0 : 1;
879 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
881 case AV_CODEC_ID_MSMPEG4V2:
882 s->out_format = FMT_H263;
884 s->unrestricted_mv = 1;
885 s->msmpeg4_version = 2;
889 case AV_CODEC_ID_MSMPEG4V3:
890 s->out_format = FMT_H263;
892 s->unrestricted_mv = 1;
893 s->msmpeg4_version = 3;
894 s->flipflop_rounding = 1;
898 case AV_CODEC_ID_WMV1:
899 s->out_format = FMT_H263;
901 s->unrestricted_mv = 1;
902 s->msmpeg4_version = 4;
903 s->flipflop_rounding = 1;
907 case AV_CODEC_ID_WMV2:
908 s->out_format = FMT_H263;
910 s->unrestricted_mv = 1;
911 s->msmpeg4_version = 5;
912 s->flipflop_rounding = 1;
917 return AVERROR(EINVAL);
920 #if FF_API_PRIVATE_OPT
921 FF_DISABLE_DEPRECATION_WARNINGS
922 if (avctx->noise_reduction)
923 s->noise_reduction = avctx->noise_reduction;
924 FF_ENABLE_DEPRECATION_WARNINGS
927 avctx->has_b_frames = !s->low_delay;
931 s->progressive_frame =
932 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
933 AV_CODEC_FLAG_INTERLACED_ME) ||
938 if ((ret = ff_mpv_common_init(s)) < 0)
941 ff_fdctdsp_init(&s->fdsp, avctx);
942 ff_me_cmp_init(&s->mecc, avctx);
943 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
944 ff_pixblockdsp_init(&s->pdsp, avctx);
945 ff_qpeldsp_init(&s->qdsp);
947 if (s->msmpeg4_version) {
948 int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
949 if (!(s->ac_stats = av_mallocz(ac_stats_size)))
950 return AVERROR(ENOMEM);
953 if (!(avctx->stats_out = av_mallocz(256)) ||
954 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
955 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
956 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
957 !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
958 !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
959 !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
960 !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
961 !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
962 return AVERROR(ENOMEM);
964 if (s->noise_reduction) {
965 if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
966 return AVERROR(ENOMEM);
969 ff_dct_encode_init(s);
971 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
972 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
974 if (s->slice_context_count > 1) {
977 if (avctx->codec_id == AV_CODEC_ID_H263P)
978 s->h263_slice_structured = 1;
981 s->quant_precision = 5;
983 #if FF_API_PRIVATE_OPT
984 FF_DISABLE_DEPRECATION_WARNINGS
985 if (avctx->frame_skip_threshold)
986 s->frame_skip_threshold = avctx->frame_skip_threshold;
987 if (avctx->frame_skip_factor)
988 s->frame_skip_factor = avctx->frame_skip_factor;
989 if (avctx->frame_skip_exp)
990 s->frame_skip_exp = avctx->frame_skip_exp;
991 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
992 s->frame_skip_cmp = avctx->frame_skip_cmp;
993 FF_ENABLE_DEPRECATION_WARNINGS
996 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
997 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
999 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
1000 ff_h261_encode_init(s);
1001 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
1002 ff_h263_encode_init(s);
1003 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
1004 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
1006 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1007 && s->out_format == FMT_MPEG1)
1008 ff_mpeg1_encode_init(s);
1011 for (i = 0; i < 64; i++) {
1012 int j = s->idsp.idct_permutation[i];
1013 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1015 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1016 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1017 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1018 s->intra_matrix[j] =
1019 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1020 } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
1021 s->intra_matrix[j] =
1022 s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1025 s->chroma_intra_matrix[j] =
1026 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1027 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1029 if (avctx->intra_matrix)
1030 s->intra_matrix[j] = avctx->intra_matrix[i];
1031 if (avctx->inter_matrix)
1032 s->inter_matrix[j] = avctx->inter_matrix[i];
1035 /* precompute matrix */
1036 /* for mjpeg, we do include qscale in the matrix */
1037 if (s->out_format != FMT_MJPEG) {
1038 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1039 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1041 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1042 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1046 if ((ret = ff_rate_control_init(s)) < 0)
1049 #if FF_API_PRIVATE_OPT
1050 FF_DISABLE_DEPRECATION_WARNINGS
1051 if (avctx->brd_scale)
1052 s->brd_scale = avctx->brd_scale;
1054 if (avctx->prediction_method)
1055 s->pred = avctx->prediction_method + 1;
1056 FF_ENABLE_DEPRECATION_WARNINGS
1059 if (s->b_frame_strategy == 2) {
1060 for (i = 0; i < s->max_b_frames + 2; i++) {
1061 s->tmp_frames[i] = av_frame_alloc();
1062 if (!s->tmp_frames[i])
1063 return AVERROR(ENOMEM);
1065 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1066 s->tmp_frames[i]->width = s->width >> s->brd_scale;
1067 s->tmp_frames[i]->height = s->height >> s->brd_scale;
1069 ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1075 cpb_props = ff_add_cpb_side_data(avctx);
1077 return AVERROR(ENOMEM);
1078 cpb_props->max_bitrate = avctx->rc_max_rate;
1079 cpb_props->min_bitrate = avctx->rc_min_rate;
1080 cpb_props->avg_bitrate = avctx->bit_rate;
1081 cpb_props->buffer_size = avctx->rc_buffer_size;
1086 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1088 MpegEncContext *s = avctx->priv_data;
1091 ff_rate_control_uninit(s);
1093 ff_mpv_common_end(s);
1094 if (CONFIG_MJPEG_ENCODER &&
1095 s->out_format == FMT_MJPEG)
1096 ff_mjpeg_encode_close(s);
1098 av_freep(&avctx->extradata);
1100 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1101 av_frame_free(&s->tmp_frames[i]);
1103 ff_free_picture_tables(&s->new_picture);
1104 ff_mpeg_unref_picture(avctx, &s->new_picture);
1106 av_freep(&avctx->stats_out);
1107 av_freep(&s->ac_stats);
1109 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1110 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1111 s->q_chroma_intra_matrix= NULL;
1112 s->q_chroma_intra_matrix16= NULL;
1113 av_freep(&s->q_intra_matrix);
1114 av_freep(&s->q_inter_matrix);
1115 av_freep(&s->q_intra_matrix16);
1116 av_freep(&s->q_inter_matrix16);
1117 av_freep(&s->input_picture);
1118 av_freep(&s->reordered_input_picture);
1119 av_freep(&s->dct_offset);
1124 static int get_sae(uint8_t *src, int ref, int stride)
1129 for (y = 0; y < 16; y++) {
1130 for (x = 0; x < 16; x++) {
1131 acc += FFABS(src[x + y * stride] - ref);
1138 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1139 uint8_t *ref, int stride)
1145 h = s->height & ~15;
1147 for (y = 0; y < h; y += 16) {
1148 for (x = 0; x < w; x += 16) {
1149 int offset = x + y * stride;
1150 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1152 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1153 int sae = get_sae(src + offset, mean, stride);
1155 acc += sae + 500 < sad;
1161 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1163 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1164 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1165 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1166 &s->linesize, &s->uvlinesize);
1169 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1171 Picture *pic = NULL;
1173 int i, display_picture_number = 0, ret;
1174 int encoding_delay = s->max_b_frames ? s->max_b_frames
1175 : (s->low_delay ? 0 : 1);
1176 int flush_offset = 1;
1181 display_picture_number = s->input_picture_number++;
1183 if (pts != AV_NOPTS_VALUE) {
1184 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1185 int64_t last = s->user_specified_pts;
1188 av_log(s->avctx, AV_LOG_ERROR,
1189 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1191 return AVERROR(EINVAL);
1194 if (!s->low_delay && display_picture_number == 1)
1195 s->dts_delta = pts - last;
1197 s->user_specified_pts = pts;
1199 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1200 s->user_specified_pts =
1201 pts = s->user_specified_pts + 1;
1202 av_log(s->avctx, AV_LOG_INFO,
1203 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1206 pts = display_picture_number;
1210 if (!pic_arg->buf[0] ||
1211 pic_arg->linesize[0] != s->linesize ||
1212 pic_arg->linesize[1] != s->uvlinesize ||
1213 pic_arg->linesize[2] != s->uvlinesize)
1215 if ((s->width & 15) || (s->height & 15))
1217 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1219 if (s->linesize & (STRIDE_ALIGN-1))
1222 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1223 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1225 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1229 pic = &s->picture[i];
1233 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1236 ret = alloc_picture(s, pic, direct);
1241 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1242 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1243 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1246 int h_chroma_shift, v_chroma_shift;
1247 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1251 for (i = 0; i < 3; i++) {
1252 int src_stride = pic_arg->linesize[i];
1253 int dst_stride = i ? s->uvlinesize : s->linesize;
1254 int h_shift = i ? h_chroma_shift : 0;
1255 int v_shift = i ? v_chroma_shift : 0;
1256 int w = s->width >> h_shift;
1257 int h = s->height >> v_shift;
1258 uint8_t *src = pic_arg->data[i];
1259 uint8_t *dst = pic->f->data[i];
1262 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1263 && !s->progressive_sequence
1264 && FFALIGN(s->height, 32) - s->height > 16)
1267 if (!s->avctx->rc_buffer_size)
1268 dst += INPLACE_OFFSET;
1270 if (src_stride == dst_stride)
1271 memcpy(dst, src, src_stride * h);
1274 uint8_t *dst2 = dst;
1276 memcpy(dst2, src, w);
1281 if ((s->width & 15) || (s->height & (vpad-1))) {
1282 s->mpvencdsp.draw_edges(dst, dst_stride,
1292 ret = av_frame_copy_props(pic->f, pic_arg);
1296 pic->f->display_picture_number = display_picture_number;
1297 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1299 /* Flushing: When we have not received enough input frames,
1300 * ensure s->input_picture[0] contains the first picture */
1301 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1302 if (s->input_picture[flush_offset])
1305 if (flush_offset <= 1)
1308 encoding_delay = encoding_delay - flush_offset + 1;
1311 /* shift buffer entries */
1312 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1313 s->input_picture[i - flush_offset] = s->input_picture[i];
1315 s->input_picture[encoding_delay] = (Picture*) pic;
1320 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1324 int64_t score64 = 0;
1326 for (plane = 0; plane < 3; plane++) {
1327 const int stride = p->f->linesize[plane];
1328 const int bw = plane ? 1 : 2;
1329 for (y = 0; y < s->mb_height * bw; y++) {
1330 for (x = 0; x < s->mb_width * bw; x++) {
1331 int off = p->shared ? 0 : 16;
1332 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1333 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1334 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1336 switch (FFABS(s->frame_skip_exp)) {
1337 case 0: score = FFMAX(score, v); break;
1338 case 1: score += FFABS(v); break;
1339 case 2: score64 += v * (int64_t)v; break;
1340 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1341 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1350 if (s->frame_skip_exp < 0)
1351 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1352 -1.0/s->frame_skip_exp);
1354 if (score64 < s->frame_skip_threshold)
1356 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1361 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1363 AVPacket pkt = { 0 };
1367 av_init_packet(&pkt);
1369 ret = avcodec_send_frame(c, frame);
1374 ret = avcodec_receive_packet(c, &pkt);
1377 av_packet_unref(&pkt);
1378 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1385 static int estimate_best_b_count(MpegEncContext *s)
1387 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1388 const int scale = s->brd_scale;
1389 int width = s->width >> scale;
1390 int height = s->height >> scale;
1391 int i, j, out_size, p_lambda, b_lambda, lambda2;
1392 int64_t best_rd = INT64_MAX;
1393 int best_b_count = -1;
1396 av_assert0(scale >= 0 && scale <= 3);
1399 //s->next_picture_ptr->quality;
1400 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1401 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1402 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1403 if (!b_lambda) // FIXME we should do this somewhere else
1404 b_lambda = p_lambda;
1405 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1408 for (i = 0; i < s->max_b_frames + 2; i++) {
1409 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1410 s->next_picture_ptr;
1413 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1414 pre_input = *pre_input_ptr;
1415 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1417 if (!pre_input.shared && i) {
1418 data[0] += INPLACE_OFFSET;
1419 data[1] += INPLACE_OFFSET;
1420 data[2] += INPLACE_OFFSET;
1423 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1424 s->tmp_frames[i]->linesize[0],
1426 pre_input.f->linesize[0],
1428 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1429 s->tmp_frames[i]->linesize[1],
1431 pre_input.f->linesize[1],
1432 width >> 1, height >> 1);
1433 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1434 s->tmp_frames[i]->linesize[2],
1436 pre_input.f->linesize[2],
1437 width >> 1, height >> 1);
1441 for (j = 0; j < s->max_b_frames + 1; j++) {
1445 if (!s->input_picture[j])
1448 c = avcodec_alloc_context3(NULL);
1450 return AVERROR(ENOMEM);
1454 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1455 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1456 c->mb_decision = s->avctx->mb_decision;
1457 c->me_cmp = s->avctx->me_cmp;
1458 c->mb_cmp = s->avctx->mb_cmp;
1459 c->me_sub_cmp = s->avctx->me_sub_cmp;
1460 c->pix_fmt = AV_PIX_FMT_YUV420P;
1461 c->time_base = s->avctx->time_base;
1462 c->max_b_frames = s->max_b_frames;
1464 ret = avcodec_open2(c, codec, NULL);
1468 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1469 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1471 out_size = encode_frame(c, s->tmp_frames[0]);
1477 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1479 for (i = 0; i < s->max_b_frames + 1; i++) {
1480 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1482 s->tmp_frames[i + 1]->pict_type = is_p ?
1483 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1484 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1486 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1492 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1495 /* get the delayed frames */
1496 out_size = encode_frame(c, NULL);
1501 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1503 rd += c->error[0] + c->error[1] + c->error[2];
1511 avcodec_free_context(&c);
1516 return best_b_count;
1519 static int select_input_picture(MpegEncContext *s)
1523 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1524 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1525 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1527 /* set next picture type & ordering */
1528 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1529 if (s->frame_skip_threshold || s->frame_skip_factor) {
1530 if (s->picture_in_gop_number < s->gop_size &&
1531 s->next_picture_ptr &&
1532 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1533 // FIXME check that the gop check above is +-1 correct
1534 av_frame_unref(s->input_picture[0]->f);
1536 ff_vbv_update(s, 0);
1542 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1543 !s->next_picture_ptr || s->intra_only) {
1544 s->reordered_input_picture[0] = s->input_picture[0];
1545 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1546 s->reordered_input_picture[0]->f->coded_picture_number =
1547 s->coded_picture_number++;
1551 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1552 for (i = 0; i < s->max_b_frames + 1; i++) {
1553 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1555 if (pict_num >= s->rc_context.num_entries)
1557 if (!s->input_picture[i]) {
1558 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1562 s->input_picture[i]->f->pict_type =
1563 s->rc_context.entry[pict_num].new_pict_type;
1567 if (s->b_frame_strategy == 0) {
1568 b_frames = s->max_b_frames;
1569 while (b_frames && !s->input_picture[b_frames])
1571 } else if (s->b_frame_strategy == 1) {
1572 for (i = 1; i < s->max_b_frames + 1; i++) {
1573 if (s->input_picture[i] &&
1574 s->input_picture[i]->b_frame_score == 0) {
1575 s->input_picture[i]->b_frame_score =
1577 s->input_picture[i ]->f->data[0],
1578 s->input_picture[i - 1]->f->data[0],
1582 for (i = 0; i < s->max_b_frames + 1; i++) {
1583 if (!s->input_picture[i] ||
1584 s->input_picture[i]->b_frame_score - 1 >
1585 s->mb_num / s->b_sensitivity)
1589 b_frames = FFMAX(0, i - 1);
1592 for (i = 0; i < b_frames + 1; i++) {
1593 s->input_picture[i]->b_frame_score = 0;
1595 } else if (s->b_frame_strategy == 2) {
1596 b_frames = estimate_best_b_count(s);
1603 for (i = b_frames - 1; i >= 0; i--) {
1604 int type = s->input_picture[i]->f->pict_type;
1605 if (type && type != AV_PICTURE_TYPE_B)
1608 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1609 b_frames == s->max_b_frames) {
1610 av_log(s->avctx, AV_LOG_ERROR,
1611 "warning, too many B-frames in a row\n");
1614 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1615 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1616 s->gop_size > s->picture_in_gop_number) {
1617 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1619 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1621 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1625 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1626 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1629 s->reordered_input_picture[0] = s->input_picture[b_frames];
1630 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1631 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1632 s->reordered_input_picture[0]->f->coded_picture_number =
1633 s->coded_picture_number++;
1634 for (i = 0; i < b_frames; i++) {
1635 s->reordered_input_picture[i + 1] = s->input_picture[i];
1636 s->reordered_input_picture[i + 1]->f->pict_type =
1638 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1639 s->coded_picture_number++;
1644 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1646 if (s->reordered_input_picture[0]) {
1647 s->reordered_input_picture[0]->reference =
1648 s->reordered_input_picture[0]->f->pict_type !=
1649 AV_PICTURE_TYPE_B ? 3 : 0;
1651 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1654 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1655 // input is a shared pix, so we can't modify it -> allocate a new
1656 // one & ensure that the shared one is reuseable
1659 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1662 pic = &s->picture[i];
1664 pic->reference = s->reordered_input_picture[0]->reference;
1665 if (alloc_picture(s, pic, 0) < 0) {
1669 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1673 /* mark us unused / free shared pic */
1674 av_frame_unref(s->reordered_input_picture[0]->f);
1675 s->reordered_input_picture[0]->shared = 0;
1677 s->current_picture_ptr = pic;
1679 // input is not a shared pix -> reuse buffer for current_pix
1680 s->current_picture_ptr = s->reordered_input_picture[0];
1681 for (i = 0; i < 4; i++) {
1682 s->new_picture.f->data[i] += INPLACE_OFFSET;
1685 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1686 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1687 s->current_picture_ptr)) < 0)
1690 s->picture_number = s->new_picture.f->display_picture_number;
1695 static void frame_end(MpegEncContext *s)
1697 if (s->unrestricted_mv &&
1698 s->current_picture.reference &&
1700 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1701 int hshift = desc->log2_chroma_w;
1702 int vshift = desc->log2_chroma_h;
1703 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1704 s->current_picture.f->linesize[0],
1705 s->h_edge_pos, s->v_edge_pos,
1706 EDGE_WIDTH, EDGE_WIDTH,
1707 EDGE_TOP | EDGE_BOTTOM);
1708 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1709 s->current_picture.f->linesize[1],
1710 s->h_edge_pos >> hshift,
1711 s->v_edge_pos >> vshift,
1712 EDGE_WIDTH >> hshift,
1713 EDGE_WIDTH >> vshift,
1714 EDGE_TOP | EDGE_BOTTOM);
1715 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1716 s->current_picture.f->linesize[2],
1717 s->h_edge_pos >> hshift,
1718 s->v_edge_pos >> vshift,
1719 EDGE_WIDTH >> hshift,
1720 EDGE_WIDTH >> vshift,
1721 EDGE_TOP | EDGE_BOTTOM);
1726 s->last_pict_type = s->pict_type;
1727 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1728 if (s->pict_type!= AV_PICTURE_TYPE_B)
1729 s->last_non_b_pict_type = s->pict_type;
1731 #if FF_API_CODED_FRAME
1732 FF_DISABLE_DEPRECATION_WARNINGS
1733 av_frame_unref(s->avctx->coded_frame);
1734 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1735 FF_ENABLE_DEPRECATION_WARNINGS
1737 #if FF_API_ERROR_FRAME
1738 FF_DISABLE_DEPRECATION_WARNINGS
1739 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1740 sizeof(s->current_picture.encoding_error));
1741 FF_ENABLE_DEPRECATION_WARNINGS
1745 static void update_noise_reduction(MpegEncContext *s)
1749 for (intra = 0; intra < 2; intra++) {
1750 if (s->dct_count[intra] > (1 << 16)) {
1751 for (i = 0; i < 64; i++) {
1752 s->dct_error_sum[intra][i] >>= 1;
1754 s->dct_count[intra] >>= 1;
1757 for (i = 0; i < 64; i++) {
1758 s->dct_offset[intra][i] = (s->noise_reduction *
1759 s->dct_count[intra] +
1760 s->dct_error_sum[intra][i] / 2) /
1761 (s->dct_error_sum[intra][i] + 1);
1766 static int frame_start(MpegEncContext *s)
1770 /* mark & release old frames */
1771 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1772 s->last_picture_ptr != s->next_picture_ptr &&
1773 s->last_picture_ptr->f->buf[0]) {
1774 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1777 s->current_picture_ptr->f->pict_type = s->pict_type;
1778 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1780 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1781 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1782 s->current_picture_ptr)) < 0)
1785 if (s->pict_type != AV_PICTURE_TYPE_B) {
1786 s->last_picture_ptr = s->next_picture_ptr;
1788 s->next_picture_ptr = s->current_picture_ptr;
1791 if (s->last_picture_ptr) {
1792 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1793 if (s->last_picture_ptr->f->buf[0] &&
1794 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1795 s->last_picture_ptr)) < 0)
1798 if (s->next_picture_ptr) {
1799 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1800 if (s->next_picture_ptr->f->buf[0] &&
1801 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1802 s->next_picture_ptr)) < 0)
1806 if (s->picture_structure!= PICT_FRAME) {
1808 for (i = 0; i < 4; i++) {
1809 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1810 s->current_picture.f->data[i] +=
1811 s->current_picture.f->linesize[i];
1813 s->current_picture.f->linesize[i] *= 2;
1814 s->last_picture.f->linesize[i] *= 2;
1815 s->next_picture.f->linesize[i] *= 2;
1819 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1820 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1821 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1822 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1823 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1824 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1826 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1827 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1830 if (s->dct_error_sum) {
1831 av_assert2(s->noise_reduction && s->encoding);
1832 update_noise_reduction(s);
1838 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1839 const AVFrame *pic_arg, int *got_packet)
1841 MpegEncContext *s = avctx->priv_data;
1842 int i, stuffing_count, ret;
1843 int context_count = s->slice_context_count;
1845 s->vbv_ignore_qmax = 0;
1847 s->picture_in_gop_number++;
1849 if (load_input_picture(s, pic_arg) < 0)
1852 if (select_input_picture(s) < 0) {
1857 if (s->new_picture.f->data[0]) {
1858 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1859 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1861 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1862 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1865 s->mb_info_ptr = av_packet_new_side_data(pkt,
1866 AV_PKT_DATA_H263_MB_INFO,
1867 s->mb_width*s->mb_height*12);
1868 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1871 for (i = 0; i < context_count; i++) {
1872 int start_y = s->thread_context[i]->start_mb_y;
1873 int end_y = s->thread_context[i]-> end_mb_y;
1874 int h = s->mb_height;
1875 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1876 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1878 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1881 s->pict_type = s->new_picture.f->pict_type;
1883 ret = frame_start(s);
1887 ret = encode_picture(s, s->picture_number);
1888 if (growing_buffer) {
1889 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1890 pkt->data = s->pb.buf;
1891 pkt->size = avctx->internal->byte_buffer_size;
1896 #if FF_API_STAT_BITS
1897 FF_DISABLE_DEPRECATION_WARNINGS
1898 avctx->header_bits = s->header_bits;
1899 avctx->mv_bits = s->mv_bits;
1900 avctx->misc_bits = s->misc_bits;
1901 avctx->i_tex_bits = s->i_tex_bits;
1902 avctx->p_tex_bits = s->p_tex_bits;
1903 avctx->i_count = s->i_count;
1904 // FIXME f/b_count in avctx
1905 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1906 avctx->skip_count = s->skip_count;
1907 FF_ENABLE_DEPRECATION_WARNINGS
1912 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1913 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1915 if (avctx->rc_buffer_size) {
1916 RateControlContext *rcc = &s->rc_context;
1917 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1918 int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1919 int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1921 if (put_bits_count(&s->pb) > max_size &&
1922 s->lambda < s->lmax) {
1923 s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1924 (s->qscale + 1) / s->qscale);
1925 if (s->adaptive_quant) {
1927 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1928 s->lambda_table[i] =
1929 FFMAX(s->lambda_table[i] + min_step,
1930 s->lambda_table[i] * (s->qscale + 1) /
1933 s->mb_skipped = 0; // done in frame_start()
1934 // done in encode_picture() so we must undo it
1935 if (s->pict_type == AV_PICTURE_TYPE_P) {
1936 if (s->flipflop_rounding ||
1937 s->codec_id == AV_CODEC_ID_H263P ||
1938 s->codec_id == AV_CODEC_ID_MPEG4)
1939 s->no_rounding ^= 1;
1941 if (s->pict_type != AV_PICTURE_TYPE_B) {
1942 s->time_base = s->last_time_base;
1943 s->last_non_b_time = s->time - s->pp_time;
1945 for (i = 0; i < context_count; i++) {
1946 PutBitContext *pb = &s->thread_context[i]->pb;
1947 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1949 s->vbv_ignore_qmax = 1;
1950 av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1954 av_assert0(avctx->rc_max_rate);
1957 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1958 ff_write_pass1_stats(s);
1960 for (i = 0; i < 4; i++) {
1961 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1962 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1964 ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1965 s->current_picture_ptr->encoding_error,
1966 (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1969 if (avctx->flags & AV_CODEC_FLAG_PASS1)
1970 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1971 s->misc_bits + s->i_tex_bits +
1973 flush_put_bits(&s->pb);
1974 s->frame_bits = put_bits_count(&s->pb);
1976 stuffing_count = ff_vbv_update(s, s->frame_bits);
1977 s->stuffing_bits = 8*stuffing_count;
1978 if (stuffing_count) {
1979 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1980 stuffing_count + 50) {
1981 av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1985 switch (s->codec_id) {
1986 case AV_CODEC_ID_MPEG1VIDEO:
1987 case AV_CODEC_ID_MPEG2VIDEO:
1988 while (stuffing_count--) {
1989 put_bits(&s->pb, 8, 0);
1992 case AV_CODEC_ID_MPEG4:
1993 put_bits(&s->pb, 16, 0);
1994 put_bits(&s->pb, 16, 0x1C3);
1995 stuffing_count -= 4;
1996 while (stuffing_count--) {
1997 put_bits(&s->pb, 8, 0xFF);
2001 av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2003 flush_put_bits(&s->pb);
2004 s->frame_bits = put_bits_count(&s->pb);
2007 /* update MPEG-1/2 vbv_delay for CBR */
2008 if (avctx->rc_max_rate &&
2009 avctx->rc_min_rate == avctx->rc_max_rate &&
2010 s->out_format == FMT_MPEG1 &&
2011 90000LL * (avctx->rc_buffer_size - 1) <=
2012 avctx->rc_max_rate * 0xFFFFLL) {
2013 AVCPBProperties *props;
2016 int vbv_delay, min_delay;
2017 double inbits = avctx->rc_max_rate *
2018 av_q2d(avctx->time_base);
2019 int minbits = s->frame_bits - 8 *
2020 (s->vbv_delay_ptr - s->pb.buf - 1);
2021 double bits = s->rc_context.buffer_index + minbits - inbits;
2024 av_log(avctx, AV_LOG_ERROR,
2025 "Internal error, negative bits\n");
2027 av_assert1(s->repeat_first_field == 0);
2029 vbv_delay = bits * 90000 / avctx->rc_max_rate;
2030 min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2033 vbv_delay = FFMAX(vbv_delay, min_delay);
2035 av_assert0(vbv_delay < 0xFFFF);
2037 s->vbv_delay_ptr[0] &= 0xF8;
2038 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2039 s->vbv_delay_ptr[1] = vbv_delay >> 5;
2040 s->vbv_delay_ptr[2] &= 0x07;
2041 s->vbv_delay_ptr[2] |= vbv_delay << 3;
2043 props = av_cpb_properties_alloc(&props_size);
2045 return AVERROR(ENOMEM);
2046 props->vbv_delay = vbv_delay * 300;
2048 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
2049 (uint8_t*)props, props_size);
2055 #if FF_API_VBV_DELAY
2056 FF_DISABLE_DEPRECATION_WARNINGS
2057 avctx->vbv_delay = vbv_delay * 300;
2058 FF_ENABLE_DEPRECATION_WARNINGS
2061 s->total_bits += s->frame_bits;
2062 #if FF_API_STAT_BITS
2063 FF_DISABLE_DEPRECATION_WARNINGS
2064 avctx->frame_bits = s->frame_bits;
2065 FF_ENABLE_DEPRECATION_WARNINGS
2069 pkt->pts = s->current_picture.f->pts;
2070 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2071 if (!s->current_picture.f->coded_picture_number)
2072 pkt->dts = pkt->pts - s->dts_delta;
2074 pkt->dts = s->reordered_pts;
2075 s->reordered_pts = pkt->pts;
2077 pkt->dts = pkt->pts;
2078 if (s->current_picture.f->key_frame)
2079 pkt->flags |= AV_PKT_FLAG_KEY;
2081 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
2086 /* release non-reference frames */
2087 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2088 if (!s->picture[i].reference)
2089 ff_mpeg_unref_picture(avctx, &s->picture[i]);
2092 av_assert1((s->frame_bits & 7) == 0);
2094 pkt->size = s->frame_bits / 8;
2095 *got_packet = !!pkt->size;
2099 static inline void dct_single_coeff_elimination(MpegEncContext *s,
2100 int n, int threshold)
2102 static const char tab[64] = {
2103 3, 2, 2, 1, 1, 1, 1, 1,
2104 1, 1, 1, 1, 1, 1, 1, 1,
2105 1, 1, 1, 1, 1, 1, 1, 1,
2106 0, 0, 0, 0, 0, 0, 0, 0,
2107 0, 0, 0, 0, 0, 0, 0, 0,
2108 0, 0, 0, 0, 0, 0, 0, 0,
2109 0, 0, 0, 0, 0, 0, 0, 0,
2110 0, 0, 0, 0, 0, 0, 0, 0
2115 int16_t *block = s->block[n];
2116 const int last_index = s->block_last_index[n];
2119 if (threshold < 0) {
2121 threshold = -threshold;
2125 /* Are all we could set to zero already zero? */
2126 if (last_index <= skip_dc - 1)
2129 for (i = 0; i <= last_index; i++) {
2130 const int j = s->intra_scantable.permutated[i];
2131 const int level = FFABS(block[j]);
2133 if (skip_dc && i == 0)
2137 } else if (level > 1) {
2143 if (score >= threshold)
2145 for (i = skip_dc; i <= last_index; i++) {
2146 const int j = s->intra_scantable.permutated[i];
2150 s->block_last_index[n] = 0;
2152 s->block_last_index[n] = -1;
2155 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2159 const int maxlevel = s->max_qcoeff;
2160 const int minlevel = s->min_qcoeff;
2164 i = 1; // skip clipping of intra dc
2168 for (; i <= last_index; i++) {
2169 const int j = s->intra_scantable.permutated[i];
2170 int level = block[j];
2172 if (level > maxlevel) {
2175 } else if (level < minlevel) {
2183 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2184 av_log(s->avctx, AV_LOG_INFO,
2185 "warning, clipping %d dct coefficients to %d..%d\n",
2186 overflow, minlevel, maxlevel);
2189 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2193 for (y = 0; y < 8; y++) {
2194 for (x = 0; x < 8; x++) {
2200 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2201 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2202 int v = ptr[x2 + y2 * stride];
2208 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2213 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2214 int motion_x, int motion_y,
2215 int mb_block_height,
2219 int16_t weight[12][64];
2220 int16_t orig[12][64];
2221 const int mb_x = s->mb_x;
2222 const int mb_y = s->mb_y;
2225 int dct_offset = s->linesize * 8; // default for progressive frames
2226 int uv_dct_offset = s->uvlinesize * 8;
2227 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2228 ptrdiff_t wrap_y, wrap_c;
2230 for (i = 0; i < mb_block_count; i++)
2231 skip_dct[i] = s->skipdct;
2233 if (s->adaptive_quant) {
2234 const int last_qp = s->qscale;
2235 const int mb_xy = mb_x + mb_y * s->mb_stride;
2237 s->lambda = s->lambda_table[mb_xy];
2240 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2241 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2242 s->dquant = s->qscale - last_qp;
2244 if (s->out_format == FMT_H263) {
2245 s->dquant = av_clip(s->dquant, -2, 2);
2247 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2249 if (s->pict_type == AV_PICTURE_TYPE_B) {
2250 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2253 if (s->mv_type == MV_TYPE_8X8)
2259 ff_set_qscale(s, last_qp + s->dquant);
2260 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2261 ff_set_qscale(s, s->qscale + s->dquant);
2263 wrap_y = s->linesize;
2264 wrap_c = s->uvlinesize;
2265 ptr_y = s->new_picture.f->data[0] +
2266 (mb_y * 16 * wrap_y) + mb_x * 16;
2267 ptr_cb = s->new_picture.f->data[1] +
2268 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2269 ptr_cr = s->new_picture.f->data[2] +
2270 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2272 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2273 uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2274 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2275 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2276 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2278 16, 16, mb_x * 16, mb_y * 16,
2279 s->width, s->height);
2281 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2283 mb_block_width, mb_block_height,
2284 mb_x * mb_block_width, mb_y * mb_block_height,
2286 ptr_cb = ebuf + 16 * wrap_y;
2287 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2289 mb_block_width, mb_block_height,
2290 mb_x * mb_block_width, mb_y * mb_block_height,
2292 ptr_cr = ebuf + 16 * wrap_y + 16;
2296 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2297 int progressive_score, interlaced_score;
2299 s->interlaced_dct = 0;
2300 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2301 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2302 NULL, wrap_y, 8) - 400;
2304 if (progressive_score > 0) {
2305 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2306 NULL, wrap_y * 2, 8) +
2307 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2308 NULL, wrap_y * 2, 8);
2309 if (progressive_score > interlaced_score) {
2310 s->interlaced_dct = 1;
2312 dct_offset = wrap_y;
2313 uv_dct_offset = wrap_c;
2315 if (s->chroma_format == CHROMA_422 ||
2316 s->chroma_format == CHROMA_444)
2322 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2323 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2324 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2325 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2327 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2331 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2332 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2333 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2334 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2335 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2336 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2337 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2338 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2339 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2340 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2341 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2342 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2346 op_pixels_func (*op_pix)[4];
2347 qpel_mc_func (*op_qpix)[16];
2348 uint8_t *dest_y, *dest_cb, *dest_cr;
2350 dest_y = s->dest[0];
2351 dest_cb = s->dest[1];
2352 dest_cr = s->dest[2];
2354 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2355 op_pix = s->hdsp.put_pixels_tab;
2356 op_qpix = s->qdsp.put_qpel_pixels_tab;
2358 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2359 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2362 if (s->mv_dir & MV_DIR_FORWARD) {
2363 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2364 s->last_picture.f->data,
2366 op_pix = s->hdsp.avg_pixels_tab;
2367 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2369 if (s->mv_dir & MV_DIR_BACKWARD) {
2370 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2371 s->next_picture.f->data,
2375 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2376 int progressive_score, interlaced_score;
2378 s->interlaced_dct = 0;
2379 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2380 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2384 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2385 progressive_score -= 400;
2387 if (progressive_score > 0) {
2388 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2390 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2394 if (progressive_score > interlaced_score) {
2395 s->interlaced_dct = 1;
2397 dct_offset = wrap_y;
2398 uv_dct_offset = wrap_c;
2400 if (s->chroma_format == CHROMA_422)
2406 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2407 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2408 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2409 dest_y + dct_offset, wrap_y);
2410 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2411 dest_y + dct_offset + 8, wrap_y);
2413 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2417 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2418 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2419 if (!s->chroma_y_shift) { /* 422 */
2420 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2421 dest_cb + uv_dct_offset, wrap_c);
2422 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2423 dest_cr + uv_dct_offset, wrap_c);
2426 /* pre quantization */
2427 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2428 2 * s->qscale * s->qscale) {
2430 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2432 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2434 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2435 wrap_y, 8) < 20 * s->qscale)
2437 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2438 wrap_y, 8) < 20 * s->qscale)
2440 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2442 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2444 if (!s->chroma_y_shift) { /* 422 */
2445 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2446 dest_cb + uv_dct_offset,
2447 wrap_c, 8) < 20 * s->qscale)
2449 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2450 dest_cr + uv_dct_offset,
2451 wrap_c, 8) < 20 * s->qscale)
2457 if (s->quantizer_noise_shaping) {
2459 get_visual_weight(weight[0], ptr_y , wrap_y);
2461 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2463 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2465 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2467 get_visual_weight(weight[4], ptr_cb , wrap_c);
2469 get_visual_weight(weight[5], ptr_cr , wrap_c);
2470 if (!s->chroma_y_shift) { /* 422 */
2472 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2475 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2478 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2481 /* DCT & quantize */
2482 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2484 for (i = 0; i < mb_block_count; i++) {
2487 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2488 // FIXME we could decide to change to quantizer instead of
2490 // JS: I don't think that would be a good idea it could lower
2491 // quality instead of improve it. Just INTRADC clipping
2492 // deserves changes in quantizer
2494 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2496 s->block_last_index[i] = -1;
2498 if (s->quantizer_noise_shaping) {
2499 for (i = 0; i < mb_block_count; i++) {
2501 s->block_last_index[i] =
2502 dct_quantize_refine(s, s->block[i], weight[i],
2503 orig[i], i, s->qscale);
2508 if (s->luma_elim_threshold && !s->mb_intra)
2509 for (i = 0; i < 4; i++)
2510 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2511 if (s->chroma_elim_threshold && !s->mb_intra)
2512 for (i = 4; i < mb_block_count; i++)
2513 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2515 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2516 for (i = 0; i < mb_block_count; i++) {
2517 if (s->block_last_index[i] == -1)
2518 s->coded_score[i] = INT_MAX / 256;
2523 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2524 s->block_last_index[4] =
2525 s->block_last_index[5] = 0;
2527 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2528 if (!s->chroma_y_shift) { /* 422 / 444 */
2529 for (i=6; i<12; i++) {
2530 s->block_last_index[i] = 0;
2531 s->block[i][0] = s->block[4][0];
2536 // non c quantize code returns incorrect block_last_index FIXME
2537 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2538 for (i = 0; i < mb_block_count; i++) {
2540 if (s->block_last_index[i] > 0) {
2541 for (j = 63; j > 0; j--) {
2542 if (s->block[i][s->intra_scantable.permutated[j]])
2545 s->block_last_index[i] = j;
2550 /* huffman encode */
2551 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2552 case AV_CODEC_ID_MPEG1VIDEO:
2553 case AV_CODEC_ID_MPEG2VIDEO:
2554 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2555 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2557 case AV_CODEC_ID_MPEG4:
2558 if (CONFIG_MPEG4_ENCODER)
2559 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2561 case AV_CODEC_ID_MSMPEG4V2:
2562 case AV_CODEC_ID_MSMPEG4V3:
2563 case AV_CODEC_ID_WMV1:
2564 if (CONFIG_MSMPEG4_ENCODER)
2565 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2567 case AV_CODEC_ID_WMV2:
2568 if (CONFIG_WMV2_ENCODER)
2569 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2571 case AV_CODEC_ID_H261:
2572 if (CONFIG_H261_ENCODER)
2573 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2575 case AV_CODEC_ID_H263:
2576 case AV_CODEC_ID_H263P:
2577 case AV_CODEC_ID_FLV1:
2578 case AV_CODEC_ID_RV10:
2579 case AV_CODEC_ID_RV20:
2580 if (CONFIG_H263_ENCODER)
2581 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2583 case AV_CODEC_ID_MJPEG:
2584 case AV_CODEC_ID_AMV:
2585 if (CONFIG_MJPEG_ENCODER)
2586 ff_mjpeg_encode_mb(s, s->block);
2588 case AV_CODEC_ID_SPEEDHQ:
2589 if (CONFIG_SPEEDHQ_ENCODER)
2590 ff_speedhq_encode_mb(s, s->block);
2597 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2599 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2600 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2601 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2604 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2607 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2610 d->mb_skip_run= s->mb_skip_run;
2612 d->last_dc[i] = s->last_dc[i];
2615 d->mv_bits= s->mv_bits;
2616 d->i_tex_bits= s->i_tex_bits;
2617 d->p_tex_bits= s->p_tex_bits;
2618 d->i_count= s->i_count;
2619 d->f_count= s->f_count;
2620 d->b_count= s->b_count;
2621 d->skip_count= s->skip_count;
2622 d->misc_bits= s->misc_bits;
2626 d->qscale= s->qscale;
2627 d->dquant= s->dquant;
2629 d->esc3_level_length= s->esc3_level_length;
2632 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2635 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2636 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2639 d->mb_skip_run= s->mb_skip_run;
2641 d->last_dc[i] = s->last_dc[i];
2644 d->mv_bits= s->mv_bits;
2645 d->i_tex_bits= s->i_tex_bits;
2646 d->p_tex_bits= s->p_tex_bits;
2647 d->i_count= s->i_count;
2648 d->f_count= s->f_count;
2649 d->b_count= s->b_count;
2650 d->skip_count= s->skip_count;
2651 d->misc_bits= s->misc_bits;
2653 d->mb_intra= s->mb_intra;
2654 d->mb_skipped= s->mb_skipped;
2655 d->mv_type= s->mv_type;
2656 d->mv_dir= s->mv_dir;
2658 if(s->data_partitioning){
2660 d->tex_pb= s->tex_pb;
2664 d->block_last_index[i]= s->block_last_index[i];
2665 d->interlaced_dct= s->interlaced_dct;
2666 d->qscale= s->qscale;
2668 d->esc3_level_length= s->esc3_level_length;
2671 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2672 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2673 int *dmin, int *next_block, int motion_x, int motion_y)
2676 uint8_t *dest_backup[3];
2678 copy_context_before_encode(s, backup, type);
2680 s->block= s->blocks[*next_block];
2681 s->pb= pb[*next_block];
2682 if(s->data_partitioning){
2683 s->pb2 = pb2 [*next_block];
2684 s->tex_pb= tex_pb[*next_block];
2688 memcpy(dest_backup, s->dest, sizeof(s->dest));
2689 s->dest[0] = s->sc.rd_scratchpad;
2690 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2691 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2692 av_assert0(s->linesize >= 32); //FIXME
2695 encode_mb(s, motion_x, motion_y);
2697 score= put_bits_count(&s->pb);
2698 if(s->data_partitioning){
2699 score+= put_bits_count(&s->pb2);
2700 score+= put_bits_count(&s->tex_pb);
2703 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2704 ff_mpv_reconstruct_mb(s, s->block);
2706 score *= s->lambda2;
2707 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2711 memcpy(s->dest, dest_backup, sizeof(s->dest));
2718 copy_context_after_encode(best, s, type);
2722 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2723 const uint32_t *sq = ff_square_tab + 256;
2728 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2729 else if(w==8 && h==8)
2730 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2734 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2743 static int sse_mb(MpegEncContext *s){
2747 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2748 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2751 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2752 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2753 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2754 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2756 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2757 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2758 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2761 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2762 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2763 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2766 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2767 MpegEncContext *s= *(void**)arg;
2771 s->me.dia_size= s->avctx->pre_dia_size;
2772 s->first_slice_line=1;
2773 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2774 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2775 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2777 s->first_slice_line=0;
2785 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2786 MpegEncContext *s= *(void**)arg;
2788 ff_check_alignment();
2790 s->me.dia_size= s->avctx->dia_size;
2791 s->first_slice_line=1;
2792 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2793 s->mb_x=0; //for block init below
2794 ff_init_block_index(s);
2795 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2796 s->block_index[0]+=2;
2797 s->block_index[1]+=2;
2798 s->block_index[2]+=2;
2799 s->block_index[3]+=2;
2801 /* compute motion vector & mb_type and store in context */
2802 if(s->pict_type==AV_PICTURE_TYPE_B)
2803 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2805 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2807 s->first_slice_line=0;
2812 static int mb_var_thread(AVCodecContext *c, void *arg){
2813 MpegEncContext *s= *(void**)arg;
2816 ff_check_alignment();
2818 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2819 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2822 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2824 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2826 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2827 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2829 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2830 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2831 s->me.mb_var_sum_temp += varc;
2837 static void write_slice_end(MpegEncContext *s){
2838 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2839 if(s->partitioned_frame){
2840 ff_mpeg4_merge_partitions(s);
2843 ff_mpeg4_stuffing(&s->pb);
2844 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2845 ff_mjpeg_encode_stuffing(s);
2846 } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2847 ff_speedhq_end_slice(s);
2850 flush_put_bits(&s->pb);
2852 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2853 s->misc_bits+= get_bits_diff(s);
2856 static void write_mb_info(MpegEncContext *s)
2858 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2859 int offset = put_bits_count(&s->pb);
2860 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2861 int gobn = s->mb_y / s->gob_index;
2863 if (CONFIG_H263_ENCODER)
2864 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2865 bytestream_put_le32(&ptr, offset);
2866 bytestream_put_byte(&ptr, s->qscale);
2867 bytestream_put_byte(&ptr, gobn);
2868 bytestream_put_le16(&ptr, mba);
2869 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2870 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2871 /* 4MV not implemented */
2872 bytestream_put_byte(&ptr, 0); /* hmv2 */
2873 bytestream_put_byte(&ptr, 0); /* vmv2 */
2876 static void update_mb_info(MpegEncContext *s, int startcode)
2880 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2881 s->mb_info_size += 12;
2882 s->prev_mb_info = s->last_mb_info;
2885 s->prev_mb_info = put_bits_count(&s->pb)/8;
2886 /* This might have incremented mb_info_size above, and we return without
2887 * actually writing any info into that slot yet. But in that case,
2888 * this will be called again at the start of the after writing the
2889 * start code, actually writing the mb info. */
2893 s->last_mb_info = put_bits_count(&s->pb)/8;
2894 if (!s->mb_info_size)
2895 s->mb_info_size += 12;
2899 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2901 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2902 && s->slice_context_count == 1
2903 && s->pb.buf == s->avctx->internal->byte_buffer) {
2904 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2905 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2907 uint8_t *new_buffer = NULL;
2908 int new_buffer_size = 0;
2910 if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2911 av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2912 return AVERROR(ENOMEM);
2917 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2918 s->avctx->internal->byte_buffer_size + size_increase);
2920 return AVERROR(ENOMEM);
2922 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2923 av_free(s->avctx->internal->byte_buffer);
2924 s->avctx->internal->byte_buffer = new_buffer;
2925 s->avctx->internal->byte_buffer_size = new_buffer_size;
2926 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2927 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2928 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2930 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2931 return AVERROR(EINVAL);
2935 static int encode_thread(AVCodecContext *c, void *arg){
2936 MpegEncContext *s= *(void**)arg;
2937 int mb_x, mb_y, mb_y_order;
2938 int chr_h= 16>>s->chroma_y_shift;
2940 MpegEncContext best_s = { 0 }, backup_s;
2941 uint8_t bit_buf[2][MAX_MB_BYTES];
2942 uint8_t bit_buf2[2][MAX_MB_BYTES];
2943 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2944 PutBitContext pb[2], pb2[2], tex_pb[2];
2946 ff_check_alignment();
2949 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2950 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2951 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2954 s->last_bits= put_bits_count(&s->pb);
2965 /* init last dc values */
2966 /* note: quant matrix value (8) is implied here */
2967 s->last_dc[i] = 128 << s->intra_dc_precision;
2969 s->current_picture.encoding_error[i] = 0;
2971 if(s->codec_id==AV_CODEC_ID_AMV){
2972 s->last_dc[0] = 128*8/13;
2973 s->last_dc[1] = 128*8/14;
2974 s->last_dc[2] = 128*8/14;
2977 memset(s->last_mv, 0, sizeof(s->last_mv));
2981 switch(s->codec_id){
2982 case AV_CODEC_ID_H263:
2983 case AV_CODEC_ID_H263P:
2984 case AV_CODEC_ID_FLV1:
2985 if (CONFIG_H263_ENCODER)
2986 s->gob_index = H263_GOB_HEIGHT(s->height);
2988 case AV_CODEC_ID_MPEG4:
2989 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2990 ff_mpeg4_init_partitions(s);
2996 s->first_slice_line = 1;
2997 s->ptr_lastgob = s->pb.buf;
2998 for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2999 if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
3001 mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
3002 if (first_in_slice && mb_y_order != s->start_mb_y)
3003 ff_speedhq_end_slice(s);
3004 s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
3011 ff_set_qscale(s, s->qscale);
3012 ff_init_block_index(s);
3014 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3015 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3016 int mb_type= s->mb_type[xy];
3020 int size_increase = s->avctx->internal->byte_buffer_size/4
3021 + s->mb_width*MAX_MB_BYTES;
3023 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
3024 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
3025 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3028 if(s->data_partitioning){
3029 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3030 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3031 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3037 s->mb_y = mb_y; // moved into loop, can get changed by H.261
3038 ff_update_block_index(s);
3040 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3041 ff_h261_reorder_mb_index(s);
3042 xy= s->mb_y*s->mb_stride + s->mb_x;
3043 mb_type= s->mb_type[xy];
3046 /* write gob / video packet header */
3048 int current_packet_size, is_gob_start;
3050 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3052 is_gob_start = s->rtp_payload_size &&
3053 current_packet_size >= s->rtp_payload_size &&
3056 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3058 switch(s->codec_id){
3059 case AV_CODEC_ID_H263:
3060 case AV_CODEC_ID_H263P:
3061 if(!s->h263_slice_structured)
3062 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3064 case AV_CODEC_ID_MPEG2VIDEO:
3065 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3066 case AV_CODEC_ID_MPEG1VIDEO:
3067 if(s->mb_skip_run) is_gob_start=0;
3069 case AV_CODEC_ID_MJPEG:
3070 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3075 if(s->start_mb_y != mb_y || mb_x!=0){
3078 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3079 ff_mpeg4_init_partitions(s);
3083 av_assert2((put_bits_count(&s->pb)&7) == 0);
3084 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3086 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3087 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3088 int d = 100 / s->error_rate;
3090 current_packet_size=0;
3091 s->pb.buf_ptr= s->ptr_lastgob;
3092 av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3096 #if FF_API_RTP_CALLBACK
3097 FF_DISABLE_DEPRECATION_WARNINGS
3098 if (s->avctx->rtp_callback){
3099 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3100 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3102 FF_ENABLE_DEPRECATION_WARNINGS
3104 update_mb_info(s, 1);
3106 switch(s->codec_id){
3107 case AV_CODEC_ID_MPEG4:
3108 if (CONFIG_MPEG4_ENCODER) {
3109 ff_mpeg4_encode_video_packet_header(s);
3110 ff_mpeg4_clean_buffers(s);
3113 case AV_CODEC_ID_MPEG1VIDEO:
3114 case AV_CODEC_ID_MPEG2VIDEO:
3115 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3116 ff_mpeg1_encode_slice_header(s);
3117 ff_mpeg1_clean_buffers(s);
3120 case AV_CODEC_ID_H263:
3121 case AV_CODEC_ID_H263P:
3122 if (CONFIG_H263_ENCODER)
3123 ff_h263_encode_gob_header(s, mb_y);
3127 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3128 int bits= put_bits_count(&s->pb);
3129 s->misc_bits+= bits - s->last_bits;
3133 s->ptr_lastgob += current_packet_size;
3134 s->first_slice_line=1;
3135 s->resync_mb_x=mb_x;
3136 s->resync_mb_y=mb_y;
3140 if( (s->resync_mb_x == s->mb_x)
3141 && s->resync_mb_y+1 == s->mb_y){
3142 s->first_slice_line=0;
3146 s->dquant=0; //only for QP_RD
3148 update_mb_info(s, 0);
3150 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3152 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3154 copy_context_before_encode(&backup_s, s, -1);
3156 best_s.data_partitioning= s->data_partitioning;
3157 best_s.partitioned_frame= s->partitioned_frame;
3158 if(s->data_partitioning){
3159 backup_s.pb2= s->pb2;
3160 backup_s.tex_pb= s->tex_pb;
3163 if(mb_type&CANDIDATE_MB_TYPE_INTER){
3164 s->mv_dir = MV_DIR_FORWARD;
3165 s->mv_type = MV_TYPE_16X16;
3167 s->mv[0][0][0] = s->p_mv_table[xy][0];
3168 s->mv[0][0][1] = s->p_mv_table[xy][1];
3169 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3170 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3172 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3173 s->mv_dir = MV_DIR_FORWARD;
3174 s->mv_type = MV_TYPE_FIELD;
3177 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3178 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3179 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3181 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3182 &dmin, &next_block, 0, 0);
3184 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3185 s->mv_dir = MV_DIR_FORWARD;
3186 s->mv_type = MV_TYPE_16X16;
3190 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3191 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3193 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3194 s->mv_dir = MV_DIR_FORWARD;
3195 s->mv_type = MV_TYPE_8X8;
3198 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3199 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3201 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3202 &dmin, &next_block, 0, 0);
3204 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3205 s->mv_dir = MV_DIR_FORWARD;
3206 s->mv_type = MV_TYPE_16X16;
3208 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3209 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3210 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3211 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3213 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3214 s->mv_dir = MV_DIR_BACKWARD;
3215 s->mv_type = MV_TYPE_16X16;
3217 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3218 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3219 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3220 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3222 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3223 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3224 s->mv_type = MV_TYPE_16X16;
3226 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3227 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3228 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3229 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3230 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3231 &dmin, &next_block, 0, 0);
3233 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3234 s->mv_dir = MV_DIR_FORWARD;
3235 s->mv_type = MV_TYPE_FIELD;
3238 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3239 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3240 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3242 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3243 &dmin, &next_block, 0, 0);
3245 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3246 s->mv_dir = MV_DIR_BACKWARD;
3247 s->mv_type = MV_TYPE_FIELD;
3250 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3251 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3252 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3254 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3255 &dmin, &next_block, 0, 0);
3257 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3258 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3259 s->mv_type = MV_TYPE_FIELD;
3261 for(dir=0; dir<2; dir++){
3263 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3264 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3265 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3268 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3269 &dmin, &next_block, 0, 0);
3271 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3273 s->mv_type = MV_TYPE_16X16;
3277 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3278 &dmin, &next_block, 0, 0);
3279 if(s->h263_pred || s->h263_aic){
3281 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3283 ff_clean_intra_table_entries(s); //old mode?
3287 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3288 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3289 const int last_qp= backup_s.qscale;
3292 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3293 static const int dquant_tab[4]={-1,1,-2,2};
3294 int storecoefs = s->mb_intra && s->dc_val[0];
3296 av_assert2(backup_s.dquant == 0);
3299 s->mv_dir= best_s.mv_dir;
3300 s->mv_type = MV_TYPE_16X16;
3301 s->mb_intra= best_s.mb_intra;
3302 s->mv[0][0][0] = best_s.mv[0][0][0];
3303 s->mv[0][0][1] = best_s.mv[0][0][1];
3304 s->mv[1][0][0] = best_s.mv[1][0][0];
3305 s->mv[1][0][1] = best_s.mv[1][0][1];
3307 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3308 for(; qpi<4; qpi++){
3309 int dquant= dquant_tab[qpi];
3310 qp= last_qp + dquant;
3311 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3313 backup_s.dquant= dquant;
3316 dc[i]= s->dc_val[0][ s->block_index[i] ];
3317 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3321 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3322 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3323 if(best_s.qscale != qp){
3326 s->dc_val[0][ s->block_index[i] ]= dc[i];
3327 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3334 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3335 int mx= s->b_direct_mv_table[xy][0];
3336 int my= s->b_direct_mv_table[xy][1];
3338 backup_s.dquant = 0;
3339 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3341 ff_mpeg4_set_direct_mv(s, mx, my);
3342 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3343 &dmin, &next_block, mx, my);
3345 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3346 backup_s.dquant = 0;
3347 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3349 ff_mpeg4_set_direct_mv(s, 0, 0);
3350 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3351 &dmin, &next_block, 0, 0);
3353 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3356 coded |= s->block_last_index[i];
3359 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3360 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3361 mx=my=0; //FIXME find the one we actually used
3362 ff_mpeg4_set_direct_mv(s, mx, my);
3363 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3371 s->mv_dir= best_s.mv_dir;
3372 s->mv_type = best_s.mv_type;
3374 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3375 s->mv[0][0][1] = best_s.mv[0][0][1];
3376 s->mv[1][0][0] = best_s.mv[1][0][0];
3377 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3380 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3381 &dmin, &next_block, mx, my);
3386 s->current_picture.qscale_table[xy] = best_s.qscale;
3388 copy_context_after_encode(s, &best_s, -1);
3390 pb_bits_count= put_bits_count(&s->pb);
3391 flush_put_bits(&s->pb);
3392 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3395 if(s->data_partitioning){
3396 pb2_bits_count= put_bits_count(&s->pb2);
3397 flush_put_bits(&s->pb2);
3398 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3399 s->pb2= backup_s.pb2;
3401 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3402 flush_put_bits(&s->tex_pb);
3403 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3404 s->tex_pb= backup_s.tex_pb;
3406 s->last_bits= put_bits_count(&s->pb);
3408 if (CONFIG_H263_ENCODER &&
3409 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3410 ff_h263_update_motion_val(s);
3412 if(next_block==0){ //FIXME 16 vs linesize16
3413 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3414 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3415 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3418 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3419 ff_mpv_reconstruct_mb(s, s->block);
3421 int motion_x = 0, motion_y = 0;
3422 s->mv_type=MV_TYPE_16X16;
3423 // only one MB-Type possible
3426 case CANDIDATE_MB_TYPE_INTRA:
3429 motion_x= s->mv[0][0][0] = 0;
3430 motion_y= s->mv[0][0][1] = 0;
3432 case CANDIDATE_MB_TYPE_INTER:
3433 s->mv_dir = MV_DIR_FORWARD;
3435 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3436 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3438 case CANDIDATE_MB_TYPE_INTER_I:
3439 s->mv_dir = MV_DIR_FORWARD;
3440 s->mv_type = MV_TYPE_FIELD;
3443 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3444 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3445 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3448 case CANDIDATE_MB_TYPE_INTER4V:
3449 s->mv_dir = MV_DIR_FORWARD;
3450 s->mv_type = MV_TYPE_8X8;
3453 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3454 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3457 case CANDIDATE_MB_TYPE_DIRECT:
3458 if (CONFIG_MPEG4_ENCODER) {
3459 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3461 motion_x=s->b_direct_mv_table[xy][0];
3462 motion_y=s->b_direct_mv_table[xy][1];
3463 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3466 case CANDIDATE_MB_TYPE_DIRECT0:
3467 if (CONFIG_MPEG4_ENCODER) {
3468 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3470 ff_mpeg4_set_direct_mv(s, 0, 0);
3473 case CANDIDATE_MB_TYPE_BIDIR:
3474 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3476 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3477 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3478 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3479 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3481 case CANDIDATE_MB_TYPE_BACKWARD:
3482 s->mv_dir = MV_DIR_BACKWARD;
3484 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3485 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3487 case CANDIDATE_MB_TYPE_FORWARD:
3488 s->mv_dir = MV_DIR_FORWARD;
3490 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3491 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3493 case CANDIDATE_MB_TYPE_FORWARD_I:
3494 s->mv_dir = MV_DIR_FORWARD;
3495 s->mv_type = MV_TYPE_FIELD;
3498 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3499 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3500 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3503 case CANDIDATE_MB_TYPE_BACKWARD_I:
3504 s->mv_dir = MV_DIR_BACKWARD;
3505 s->mv_type = MV_TYPE_FIELD;
3508 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3509 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3510 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3513 case CANDIDATE_MB_TYPE_BIDIR_I:
3514 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3515 s->mv_type = MV_TYPE_FIELD;
3517 for(dir=0; dir<2; dir++){
3519 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3520 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3521 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3526 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3529 encode_mb(s, motion_x, motion_y);
3531 // RAL: Update last macroblock type
3532 s->last_mv_dir = s->mv_dir;
3534 if (CONFIG_H263_ENCODER &&
3535 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3536 ff_h263_update_motion_val(s);
3538 ff_mpv_reconstruct_mb(s, s->block);
3541 /* clean the MV table in IPS frames for direct mode in B-frames */
3542 if(s->mb_intra /* && I,P,S_TYPE */){
3543 s->p_mv_table[xy][0]=0;
3544 s->p_mv_table[xy][1]=0;
3547 if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3551 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3552 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3554 s->current_picture.encoding_error[0] += sse(
3555 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3556 s->dest[0], w, h, s->linesize);
3557 s->current_picture.encoding_error[1] += sse(
3558 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3559 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3560 s->current_picture.encoding_error[2] += sse(
3561 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3562 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3565 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3566 ff_h263_loop_filter(s);
3568 ff_dlog(s->avctx, "MB %d %d bits\n",
3569 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3573 //not beautiful here but we must write it before flushing so it has to be here
3574 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3575 ff_msmpeg4_encode_ext_header(s);
3579 #if FF_API_RTP_CALLBACK
3580 FF_DISABLE_DEPRECATION_WARNINGS
3581 /* Send the last GOB if RTP */
3582 if (s->avctx->rtp_callback) {
3583 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3584 int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3585 /* Call the RTP callback to send the last GOB */
3587 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3589 FF_ENABLE_DEPRECATION_WARNINGS
3595 #define MERGE(field) dst->field += src->field; src->field=0
3596 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3597 MERGE(me.scene_change_score);
3598 MERGE(me.mc_mb_var_sum_temp);
3599 MERGE(me.mb_var_sum_temp);
3602 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3605 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3606 MERGE(dct_count[1]);
3615 MERGE(er.error_count);
3616 MERGE(padding_bug_score);
3617 MERGE(current_picture.encoding_error[0]);
3618 MERGE(current_picture.encoding_error[1]);
3619 MERGE(current_picture.encoding_error[2]);
3621 if (dst->noise_reduction){
3622 for(i=0; i<64; i++){
3623 MERGE(dct_error_sum[0][i]);
3624 MERGE(dct_error_sum[1][i]);
3628 av_assert1(put_bits_count(&src->pb) % 8 ==0);
3629 av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3630 ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3631 flush_put_bits(&dst->pb);
3634 static int estimate_qp(MpegEncContext *s, int dry_run){
3635 if (s->next_lambda){
3636 s->current_picture_ptr->f->quality =
3637 s->current_picture.f->quality = s->next_lambda;
3638 if(!dry_run) s->next_lambda= 0;
3639 } else if (!s->fixed_qscale) {
3640 int quality = ff_rate_estimate_qscale(s, dry_run);
3641 s->current_picture_ptr->f->quality =
3642 s->current_picture.f->quality = quality;
3643 if (s->current_picture.f->quality < 0)
3647 if(s->adaptive_quant){
3648 switch(s->codec_id){
3649 case AV_CODEC_ID_MPEG4:
3650 if (CONFIG_MPEG4_ENCODER)
3651 ff_clean_mpeg4_qscales(s);
3653 case AV_CODEC_ID_H263:
3654 case AV_CODEC_ID_H263P:
3655 case AV_CODEC_ID_FLV1:
3656 if (CONFIG_H263_ENCODER)
3657 ff_clean_h263_qscales(s);
3660 ff_init_qscale_tab(s);
3663 s->lambda= s->lambda_table[0];
3666 s->lambda = s->current_picture.f->quality;
3671 /* must be called before writing the header */
3672 static void set_frame_distances(MpegEncContext * s){
3673 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3674 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3676 if(s->pict_type==AV_PICTURE_TYPE_B){
3677 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3678 av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3680 s->pp_time= s->time - s->last_non_b_time;
3681 s->last_non_b_time= s->time;
3682 av_assert1(s->picture_number==0 || s->pp_time > 0);
3686 static int encode_picture(MpegEncContext *s, int picture_number)
3690 int context_count = s->slice_context_count;
3692 s->picture_number = picture_number;
3694 /* Reset the average MB variance */
3695 s->me.mb_var_sum_temp =
3696 s->me.mc_mb_var_sum_temp = 0;
3698 /* we need to initialize some time vars before we can encode B-frames */
3699 // RAL: Condition added for MPEG1VIDEO
3700 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3701 set_frame_distances(s);
3702 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3703 ff_set_mpeg4_time(s);
3705 s->me.scene_change_score=0;
3707 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3709 if(s->pict_type==AV_PICTURE_TYPE_I){
3710 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3711 else s->no_rounding=0;
3712 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3713 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3714 s->no_rounding ^= 1;
3717 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3718 if (estimate_qp(s,1) < 0)
3720 ff_get_2pass_fcode(s);
3721 } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3722 if(s->pict_type==AV_PICTURE_TYPE_B)
3723 s->lambda= s->last_lambda_for[s->pict_type];
3725 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3729 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3730 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3731 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3732 s->q_chroma_intra_matrix = s->q_intra_matrix;
3733 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3736 s->mb_intra=0; //for the rate distortion & bit compare functions
3737 for(i=1; i<context_count; i++){
3738 ret = ff_update_duplicate_context(s->thread_context[i], s);
3746 /* Estimate motion for every MB */
3747 if(s->pict_type != AV_PICTURE_TYPE_I){
3748 s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3749 s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3750 if (s->pict_type != AV_PICTURE_TYPE_B) {
3751 if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3753 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3757 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3758 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3760 for(i=0; i<s->mb_stride*s->mb_height; i++)
3761 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3763 if(!s->fixed_qscale){
3764 /* finding spatial complexity for I-frame rate control */
3765 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3768 for(i=1; i<context_count; i++){
3769 merge_context_after_me(s, s->thread_context[i]);
3771 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3772 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3775 if (s->me.scene_change_score > s->scenechange_threshold &&
3776 s->pict_type == AV_PICTURE_TYPE_P) {
3777 s->pict_type= AV_PICTURE_TYPE_I;
3778 for(i=0; i<s->mb_stride*s->mb_height; i++)
3779 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3780 if(s->msmpeg4_version >= 3)
3782 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3783 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3787 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3788 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3790 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3792 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3793 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3794 s->f_code= FFMAX3(s->f_code, a, b);
3797 ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
3798 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3799 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3803 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3804 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3809 if(s->pict_type==AV_PICTURE_TYPE_B){
3812 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3813 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3814 s->f_code = FFMAX(a, b);
3816 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3817 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3818 s->b_code = FFMAX(a, b);
3820 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3821 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3822 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3823 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3824 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3826 for(dir=0; dir<2; dir++){
3829 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3830 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3831 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3832 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3840 if (estimate_qp(s, 0) < 0)
3843 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3844 s->pict_type == AV_PICTURE_TYPE_I &&
3845 !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3846 s->qscale= 3; //reduce clipping problems
3848 if (s->out_format == FMT_MJPEG) {
3849 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3850 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3852 if (s->avctx->intra_matrix) {
3854 luma_matrix = s->avctx->intra_matrix;
3856 if (s->avctx->chroma_intra_matrix)
3857 chroma_matrix = s->avctx->chroma_intra_matrix;
3859 /* for mjpeg, we do include qscale in the matrix */
3861 int j = s->idsp.idct_permutation[i];
3863 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3864 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3866 s->y_dc_scale_table=
3867 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3868 s->chroma_intra_matrix[0] =
3869 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3870 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3871 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3872 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3873 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3876 if(s->codec_id == AV_CODEC_ID_AMV){
3877 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3878 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3880 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3882 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3883 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3885 s->y_dc_scale_table= y;
3886 s->c_dc_scale_table= c;
3887 s->intra_matrix[0] = 13;
3888 s->chroma_intra_matrix[0] = 14;
3889 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3890 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3891 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3892 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3896 if (s->out_format == FMT_SPEEDHQ) {
3897 s->y_dc_scale_table=
3898 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3901 //FIXME var duplication
3902 s->current_picture_ptr->f->key_frame =
3903 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3904 s->current_picture_ptr->f->pict_type =
3905 s->current_picture.f->pict_type = s->pict_type;
3907 if (s->current_picture.f->key_frame)
3908 s->picture_in_gop_number=0;
3910 s->mb_x = s->mb_y = 0;
3911 s->last_bits= put_bits_count(&s->pb);
3912 switch(s->out_format) {
3914 if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3915 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3916 s->pred, s->intra_matrix, s->chroma_intra_matrix);
3919 if (CONFIG_SPEEDHQ_ENCODER)
3920 ff_speedhq_encode_picture_header(s);
3923 if (CONFIG_H261_ENCODER)
3924 ff_h261_encode_picture_header(s, picture_number);
3927 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3928 ff_wmv2_encode_picture_header(s, picture_number);
3929 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3930 ff_msmpeg4_encode_picture_header(s, picture_number);
3931 else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3932 ret = ff_mpeg4_encode_picture_header(s, picture_number);
3935 } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3936 ret = ff_rv10_encode_picture_header(s, picture_number);
3940 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3941 ff_rv20_encode_picture_header(s, picture_number);
3942 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3943 ff_flv_encode_picture_header(s, picture_number);
3944 else if (CONFIG_H263_ENCODER)
3945 ff_h263_encode_picture_header(s, picture_number);
3948 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3949 ff_mpeg1_encode_picture_header(s, picture_number);
3954 bits= put_bits_count(&s->pb);
3955 s->header_bits= bits - s->last_bits;
3957 for(i=1; i<context_count; i++){
3958 update_duplicate_context_after_me(s->thread_context[i], s);
3960 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3961 for(i=1; i<context_count; i++){
3962 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3963 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3964 merge_context_after_encode(s, s->thread_context[i]);
3970 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3971 const int intra= s->mb_intra;
3974 s->dct_count[intra]++;
3976 for(i=0; i<64; i++){
3977 int level= block[i];
3981 s->dct_error_sum[intra][i] += level;
3982 level -= s->dct_offset[intra][i];
3983 if(level<0) level=0;
3985 s->dct_error_sum[intra][i] -= level;
3986 level += s->dct_offset[intra][i];
3987 if(level>0) level=0;
3994 static int dct_quantize_trellis_c(MpegEncContext *s,
3995 int16_t *block, int n,
3996 int qscale, int *overflow){
3998 const uint16_t *matrix;
3999 const uint8_t *scantable;
4000 const uint8_t *perm_scantable;
4002 unsigned int threshold1, threshold2;
4014 int coeff_count[64];
4015 int qmul, qadd, start_i, last_non_zero, i, dc;
4016 const int esc_length= s->ac_esc_length;
4018 uint8_t * last_length;
4019 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4022 s->fdsp.fdct(block);
4024 if(s->dct_error_sum)
4025 s->denoise_dct(s, block);
4027 qadd= ((qscale-1)|1)*8;
4029 if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4030 else mpeg2_qscale = qscale << 1;
4034 scantable= s->intra_scantable.scantable;
4035 perm_scantable= s->intra_scantable.permutated;
4043 /* For AIC we skip quant/dequant of INTRADC */
4048 /* note: block[0] is assumed to be positive */
4049 block[0] = (block[0] + (q >> 1)) / q;
4052 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4053 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4054 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4055 bias= 1<<(QMAT_SHIFT-1);
4057 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4058 length = s->intra_chroma_ac_vlc_length;
4059 last_length= s->intra_chroma_ac_vlc_last_length;
4061 length = s->intra_ac_vlc_length;
4062 last_length= s->intra_ac_vlc_last_length;
4065 scantable= s->inter_scantable.scantable;
4066 perm_scantable= s->inter_scantable.permutated;
4069 qmat = s->q_inter_matrix[qscale];
4070 matrix = s->inter_matrix;
4071 length = s->inter_ac_vlc_length;
4072 last_length= s->inter_ac_vlc_last_length;
4076 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4077 threshold2= (threshold1<<1);
4079 for(i=63; i>=start_i; i--) {
4080 const int j = scantable[i];
4081 int level = block[j] * qmat[j];
4083 if(((unsigned)(level+threshold1))>threshold2){
4089 for(i=start_i; i<=last_non_zero; i++) {
4090 const int j = scantable[i];
4091 int level = block[j] * qmat[j];
4093 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4094 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4095 if(((unsigned)(level+threshold1))>threshold2){
4097 level= (bias + level)>>QMAT_SHIFT;
4099 coeff[1][i]= level-1;
4100 // coeff[2][k]= level-2;
4102 level= (bias - level)>>QMAT_SHIFT;
4103 coeff[0][i]= -level;
4104 coeff[1][i]= -level+1;
4105 // coeff[2][k]= -level+2;
4107 coeff_count[i]= FFMIN(level, 2);
4108 av_assert2(coeff_count[i]);
4111 coeff[0][i]= (level>>31)|1;
4116 *overflow= s->max_qcoeff < max; //overflow might have happened
4118 if(last_non_zero < start_i){
4119 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4120 return last_non_zero;
4123 score_tab[start_i]= 0;
4124 survivor[0]= start_i;
4127 for(i=start_i; i<=last_non_zero; i++){
4128 int level_index, j, zero_distortion;
4129 int dct_coeff= FFABS(block[ scantable[i] ]);
4130 int best_score=256*256*256*120;
4132 if (s->fdsp.fdct == ff_fdct_ifast)
4133 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4134 zero_distortion= dct_coeff*dct_coeff;
4136 for(level_index=0; level_index < coeff_count[i]; level_index++){
4138 int level= coeff[level_index][i];
4139 const int alevel= FFABS(level);
4144 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4145 unquant_coeff= alevel*qmul + qadd;
4146 } else if(s->out_format == FMT_MJPEG) {
4147 j = s->idsp.idct_permutation[scantable[i]];
4148 unquant_coeff = alevel * matrix[j] * 8;
4150 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4152 unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4153 unquant_coeff = (unquant_coeff - 1) | 1;
4155 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4156 unquant_coeff = (unquant_coeff - 1) | 1;
4161 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4163 if((level&(~127)) == 0){
4164 for(j=survivor_count-1; j>=0; j--){
4165 int run= i - survivor[j];
4166 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4167 score += score_tab[i-run];
4169 if(score < best_score){
4172 level_tab[i+1]= level-64;
4176 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4177 for(j=survivor_count-1; j>=0; j--){
4178 int run= i - survivor[j];
4179 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4180 score += score_tab[i-run];
4181 if(score < last_score){
4184 last_level= level-64;
4190 distortion += esc_length*lambda;
4191 for(j=survivor_count-1; j>=0; j--){
4192 int run= i - survivor[j];
4193 int score= distortion + score_tab[i-run];
4195 if(score < best_score){
4198 level_tab[i+1]= level-64;
4202 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4203 for(j=survivor_count-1; j>=0; j--){
4204 int run= i - survivor[j];
4205 int score= distortion + score_tab[i-run];
4206 if(score < last_score){
4209 last_level= level-64;
4217 score_tab[i+1]= best_score;
4219 // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4220 if(last_non_zero <= 27){
4221 for(; survivor_count; survivor_count--){
4222 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4226 for(; survivor_count; survivor_count--){
4227 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4232 survivor[ survivor_count++ ]= i+1;
4235 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4236 last_score= 256*256*256*120;
4237 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4238 int score= score_tab[i];
4240 score += lambda * 2; // FIXME more exact?
4242 if(score < last_score){
4245 last_level= level_tab[i];
4246 last_run= run_tab[i];
4251 s->coded_score[n] = last_score;
4253 dc= FFABS(block[0]);
4254 last_non_zero= last_i - 1;
4255 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4257 if(last_non_zero < start_i)
4258 return last_non_zero;
4260 if(last_non_zero == 0 && start_i == 0){
4262 int best_score= dc * dc;
4264 for(i=0; i<coeff_count[0]; i++){
4265 int level= coeff[i][0];
4266 int alevel= FFABS(level);
4267 int unquant_coeff, score, distortion;
4269 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4270 unquant_coeff= (alevel*qmul + qadd)>>3;
4272 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4273 unquant_coeff = (unquant_coeff - 1) | 1;
4275 unquant_coeff = (unquant_coeff + 4) >> 3;
4276 unquant_coeff<<= 3 + 3;
4278 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4280 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4281 else score= distortion + esc_length*lambda;
4283 if(score < best_score){
4285 best_level= level - 64;
4288 block[0]= best_level;
4289 s->coded_score[n] = best_score - dc*dc;
4290 if(best_level == 0) return -1;
4291 else return last_non_zero;
4295 av_assert2(last_level);
4297 block[ perm_scantable[last_non_zero] ]= last_level;
4300 for(; i>start_i; i -= run_tab[i] + 1){
4301 block[ perm_scantable[i-1] ]= level_tab[i];
4304 return last_non_zero;
4307 static int16_t basis[64][64];
4309 static void build_basis(uint8_t *perm){
4316 double s= 0.25*(1<<BASIS_SHIFT);
4318 int perm_index= perm[index];
4319 if(i==0) s*= sqrt(0.5);
4320 if(j==0) s*= sqrt(0.5);
4321 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4328 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4329 int16_t *block, int16_t *weight, int16_t *orig,
4332 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4333 const uint8_t *scantable;
4334 const uint8_t *perm_scantable;
4335 // unsigned int threshold1, threshold2;
4340 int qmul, qadd, start_i, last_non_zero, i, dc;
4342 uint8_t * last_length;
4344 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4346 if(basis[0][0] == 0)
4347 build_basis(s->idsp.idct_permutation);
4352 scantable= s->intra_scantable.scantable;
4353 perm_scantable= s->intra_scantable.permutated;
4360 /* For AIC we skip quant/dequant of INTRADC */
4364 q <<= RECON_SHIFT-3;
4365 /* note: block[0] is assumed to be positive */
4367 // block[0] = (block[0] + (q >> 1)) / q;
4369 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4370 // bias= 1<<(QMAT_SHIFT-1);
4371 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4372 length = s->intra_chroma_ac_vlc_length;
4373 last_length= s->intra_chroma_ac_vlc_last_length;
4375 length = s->intra_ac_vlc_length;
4376 last_length= s->intra_ac_vlc_last_length;
4379 scantable= s->inter_scantable.scantable;
4380 perm_scantable= s->inter_scantable.permutated;
4383 length = s->inter_ac_vlc_length;
4384 last_length= s->inter_ac_vlc_last_length;
4386 last_non_zero = s->block_last_index[n];
4388 dc += (1<<(RECON_SHIFT-1));
4389 for(i=0; i<64; i++){
4390 rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4394 for(i=0; i<64; i++){
4399 w= FFABS(weight[i]) + qns*one;
4400 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4403 // w=weight[i] = (63*qns + (w/2)) / w;
4406 av_assert2(w<(1<<6));
4409 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4413 for(i=start_i; i<=last_non_zero; i++){
4414 int j= perm_scantable[i];
4415 const int level= block[j];
4419 if(level<0) coeff= qmul*level - qadd;
4420 else coeff= qmul*level + qadd;
4421 run_tab[rle_index++]=run;
4424 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4431 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4434 int run2, best_unquant_change=0, analyze_gradient;
4435 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4437 if(analyze_gradient){
4438 for(i=0; i<64; i++){
4441 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4447 const int level= block[0];
4448 int change, old_coeff;
4450 av_assert2(s->mb_intra);
4454 for(change=-1; change<=1; change+=2){
4455 int new_level= level + change;
4456 int score, new_coeff;
4458 new_coeff= q*new_level;
4459 if(new_coeff >= 2048 || new_coeff < 0)
4462 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4463 new_coeff - old_coeff);
4464 if(score<best_score){
4467 best_change= change;
4468 best_unquant_change= new_coeff - old_coeff;
4475 run2= run_tab[rle_index++];
4479 for(i=start_i; i<64; i++){
4480 int j= perm_scantable[i];
4481 const int level= block[j];
4482 int change, old_coeff;
4484 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4488 if(level<0) old_coeff= qmul*level - qadd;
4489 else old_coeff= qmul*level + qadd;
4490 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4494 av_assert2(run2>=0 || i >= last_non_zero );
4497 for(change=-1; change<=1; change+=2){
4498 int new_level= level + change;
4499 int score, new_coeff, unquant_change;
4502 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4506 if(new_level<0) new_coeff= qmul*new_level - qadd;
4507 else new_coeff= qmul*new_level + qadd;
4508 if(new_coeff >= 2048 || new_coeff <= -2048)
4510 //FIXME check for overflow
4513 if(level < 63 && level > -63){
4514 if(i < last_non_zero)
4515 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4516 - length[UNI_AC_ENC_INDEX(run, level+64)];
4518 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4519 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4522 av_assert2(FFABS(new_level)==1);
4524 if(analyze_gradient){
4525 int g= d1[ scantable[i] ];
4526 if(g && (g^new_level) >= 0)
4530 if(i < last_non_zero){
4531 int next_i= i + run2 + 1;
4532 int next_level= block[ perm_scantable[next_i] ] + 64;
4534 if(next_level&(~127))
4537 if(next_i < last_non_zero)
4538 score += length[UNI_AC_ENC_INDEX(run, 65)]
4539 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4540 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4542 score += length[UNI_AC_ENC_INDEX(run, 65)]
4543 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4544 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4546 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4548 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4549 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4555 av_assert2(FFABS(level)==1);
4557 if(i < last_non_zero){
4558 int next_i= i + run2 + 1;
4559 int next_level= block[ perm_scantable[next_i] ] + 64;
4561 if(next_level&(~127))
4564 if(next_i < last_non_zero)
4565 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4566 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4567 - length[UNI_AC_ENC_INDEX(run, 65)];
4569 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4570 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4571 - length[UNI_AC_ENC_INDEX(run, 65)];
4573 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4575 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4576 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4583 unquant_change= new_coeff - old_coeff;
4584 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4586 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4588 if(score<best_score){
4591 best_change= change;
4592 best_unquant_change= unquant_change;
4596 prev_level= level + 64;
4597 if(prev_level&(~127))
4607 int j= perm_scantable[ best_coeff ];
4609 block[j] += best_change;
4611 if(best_coeff > last_non_zero){
4612 last_non_zero= best_coeff;
4613 av_assert2(block[j]);
4615 for(; last_non_zero>=start_i; last_non_zero--){
4616 if(block[perm_scantable[last_non_zero]])
4623 for(i=start_i; i<=last_non_zero; i++){
4624 int j= perm_scantable[i];
4625 const int level= block[j];
4628 run_tab[rle_index++]=run;
4635 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4641 return last_non_zero;
4645 * Permute an 8x8 block according to permutation.
4646 * @param block the block which will be permuted according to
4647 * the given permutation vector
4648 * @param permutation the permutation vector
4649 * @param last the last non zero coefficient in scantable order, used to
4650 * speed the permutation up
4651 * @param scantable the used scantable, this is only used to speed the
4652 * permutation up, the block is not (inverse) permutated
4653 * to scantable order!
4655 void ff_block_permute(int16_t *block, uint8_t *permutation,
4656 const uint8_t *scantable, int last)
4663 //FIXME it is ok but not clean and might fail for some permutations
4664 // if (permutation[1] == 1)
4667 for (i = 0; i <= last; i++) {
4668 const int j = scantable[i];
4673 for (i = 0; i <= last; i++) {
4674 const int j = scantable[i];
4675 const int perm_j = permutation[j];
4676 block[perm_j] = temp[j];
4680 int ff_dct_quantize_c(MpegEncContext *s,
4681 int16_t *block, int n,
4682 int qscale, int *overflow)
4684 int i, j, level, last_non_zero, q, start_i;
4686 const uint8_t *scantable;
4689 unsigned int threshold1, threshold2;
4691 s->fdsp.fdct(block);
4693 if(s->dct_error_sum)
4694 s->denoise_dct(s, block);
4697 scantable= s->intra_scantable.scantable;
4705 /* For AIC we skip quant/dequant of INTRADC */
4708 /* note: block[0] is assumed to be positive */
4709 block[0] = (block[0] + (q >> 1)) / q;
4712 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4713 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4715 scantable= s->inter_scantable.scantable;
4718 qmat = s->q_inter_matrix[qscale];
4719 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4721 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4722 threshold2= (threshold1<<1);
4723 for(i=63;i>=start_i;i--) {
4725 level = block[j] * qmat[j];
4727 if(((unsigned)(level+threshold1))>threshold2){
4734 for(i=start_i; i<=last_non_zero; i++) {
4736 level = block[j] * qmat[j];
4738 // if( bias+level >= (1<<QMAT_SHIFT)
4739 // || bias-level >= (1<<QMAT_SHIFT)){
4740 if(((unsigned)(level+threshold1))>threshold2){
4742 level= (bias + level)>>QMAT_SHIFT;
4745 level= (bias - level)>>QMAT_SHIFT;
4753 *overflow= s->max_qcoeff < max; //overflow might have happened
4755 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4756 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4757 ff_block_permute(block, s->idsp.idct_permutation,
4758 scantable, last_non_zero);
4760 return last_non_zero;
4763 #define OFFSET(x) offsetof(MpegEncContext, x)
4764 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4765 static const AVOption h263_options[] = {
4766 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4767 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4772 static const AVClass h263_class = {
4773 .class_name = "H.263 encoder",
4774 .item_name = av_default_item_name,
4775 .option = h263_options,
4776 .version = LIBAVUTIL_VERSION_INT,
4779 AVCodec ff_h263_encoder = {
4781 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4782 .type = AVMEDIA_TYPE_VIDEO,
4783 .id = AV_CODEC_ID_H263,
4784 .priv_data_size = sizeof(MpegEncContext),
4785 .init = ff_mpv_encode_init,
4786 .encode2 = ff_mpv_encode_picture,
4787 .close = ff_mpv_encode_end,
4788 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4789 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4790 .priv_class = &h263_class,
4793 static const AVOption h263p_options[] = {
4794 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4795 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4796 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4797 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4801 static const AVClass h263p_class = {
4802 .class_name = "H.263p encoder",
4803 .item_name = av_default_item_name,
4804 .option = h263p_options,
4805 .version = LIBAVUTIL_VERSION_INT,
4808 AVCodec ff_h263p_encoder = {
4810 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4811 .type = AVMEDIA_TYPE_VIDEO,
4812 .id = AV_CODEC_ID_H263P,
4813 .priv_data_size = sizeof(MpegEncContext),
4814 .init = ff_mpv_encode_init,
4815 .encode2 = ff_mpv_encode_picture,
4816 .close = ff_mpv_encode_end,
4817 .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4818 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4819 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4820 .priv_class = &h263p_class,
4823 static const AVClass msmpeg4v2_class = {
4824 .class_name = "msmpeg4v2 encoder",
4825 .item_name = av_default_item_name,
4826 .option = ff_mpv_generic_options,
4827 .version = LIBAVUTIL_VERSION_INT,
4830 AVCodec ff_msmpeg4v2_encoder = {
4831 .name = "msmpeg4v2",
4832 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4833 .type = AVMEDIA_TYPE_VIDEO,
4834 .id = AV_CODEC_ID_MSMPEG4V2,
4835 .priv_data_size = sizeof(MpegEncContext),
4836 .init = ff_mpv_encode_init,
4837 .encode2 = ff_mpv_encode_picture,
4838 .close = ff_mpv_encode_end,
4839 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4840 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4841 .priv_class = &msmpeg4v2_class,
4844 static const AVClass msmpeg4v3_class = {
4845 .class_name = "msmpeg4v3 encoder",
4846 .item_name = av_default_item_name,
4847 .option = ff_mpv_generic_options,
4848 .version = LIBAVUTIL_VERSION_INT,
4851 AVCodec ff_msmpeg4v3_encoder = {
4853 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4854 .type = AVMEDIA_TYPE_VIDEO,
4855 .id = AV_CODEC_ID_MSMPEG4V3,
4856 .priv_data_size = sizeof(MpegEncContext),
4857 .init = ff_mpv_encode_init,
4858 .encode2 = ff_mpv_encode_picture,
4859 .close = ff_mpv_encode_end,
4860 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4861 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4862 .priv_class = &msmpeg4v3_class,
4865 static const AVClass wmv1_class = {
4866 .class_name = "wmv1 encoder",
4867 .item_name = av_default_item_name,
4868 .option = ff_mpv_generic_options,
4869 .version = LIBAVUTIL_VERSION_INT,
4872 AVCodec ff_wmv1_encoder = {
4874 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4875 .type = AVMEDIA_TYPE_VIDEO,
4876 .id = AV_CODEC_ID_WMV1,
4877 .priv_data_size = sizeof(MpegEncContext),
4878 .init = ff_mpv_encode_init,
4879 .encode2 = ff_mpv_encode_picture,
4880 .close = ff_mpv_encode_end,
4881 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4882 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4883 .priv_class = &wmv1_class,