2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
32 #include "libavutil/internal.h"
33 #include "libavutil/intmath.h"
34 #include "libavutil/mathematics.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/timer.h"
42 #include "mpegvideo.h"
43 #include "mpegvideodata.h"
47 #include "mjpegenc_common.h"
49 #include "mpegutils.h"
52 #include "pixblockdsp.h"
56 #include "aandcttab.h"
58 #include "mpeg4video.h"
60 #include "bytestream.h"
66 #define QUANT_BIAS_SHIFT 8
68 #define QMAT_SHIFT_MMX 16
71 static int encode_picture(MpegEncContext *s, int picture_number);
72 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
73 static int sse_mb(MpegEncContext *s);
74 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
75 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
77 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
78 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
80 const AVOption ff_mpv_generic_options[] = {
85 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
86 uint16_t (*qmat16)[2][64],
87 const uint16_t *quant_matrix,
88 int bias, int qmin, int qmax, int intra)
90 FDCTDSPContext *fdsp = &s->fdsp;
94 for (qscale = qmin; qscale <= qmax; qscale++) {
96 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
98 fdsp->fdct == ff_faandct ||
99 #endif /* CONFIG_FAANDCT */
100 fdsp->fdct == ff_jpeg_fdct_islow_10) {
101 for (i = 0; i < 64; i++) {
102 const int j = s->idsp.idct_permutation[i];
103 int64_t den = (int64_t) qscale * quant_matrix[j];
104 /* 16 <= qscale * quant_matrix[i] <= 7905
105 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
106 * 19952 <= x <= 249205026
107 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
108 * 3444240 >= (1 << 36) / (x) >= 275 */
110 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / den);
112 } else if (fdsp->fdct == ff_fdct_ifast) {
113 for (i = 0; i < 64; i++) {
114 const int j = s->idsp.idct_permutation[i];
115 int64_t den = ff_aanscales[i] * (int64_t) qscale * quant_matrix[j];
116 /* 16 <= qscale * quant_matrix[i] <= 7905
117 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
118 * 19952 <= x <= 249205026
119 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
120 * 3444240 >= (1 << 36) / (x) >= 275 */
122 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) / den);
125 for (i = 0; i < 64; i++) {
126 const int j = s->idsp.idct_permutation[i];
127 int64_t den = (int64_t) qscale * quant_matrix[j];
128 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
129 * Assume x = qscale * quant_matrix[i]
131 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
132 * so 32768 >= (1 << 19) / (x) >= 67 */
133 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / den);
134 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
135 // (qscale * quant_matrix[i]);
136 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / den;
138 if (qmat16[qscale][0][i] == 0 ||
139 qmat16[qscale][0][i] == 128 * 256)
140 qmat16[qscale][0][i] = 128 * 256 - 1;
141 qmat16[qscale][1][i] =
142 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
143 qmat16[qscale][0][i]);
147 for (i = intra; i < 64; i++) {
149 if (fdsp->fdct == ff_fdct_ifast) {
150 max = (8191LL * ff_aanscales[i]) >> 14;
152 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
158 av_log(NULL, AV_LOG_INFO,
159 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
164 static inline void update_qscale(MpegEncContext *s)
166 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
167 (FF_LAMBDA_SHIFT + 7);
168 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
170 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
174 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
180 for (i = 0; i < 64; i++) {
181 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
188 * init s->current_picture.qscale_table from s->lambda_table
190 void ff_init_qscale_tab(MpegEncContext *s)
192 int8_t * const qscale_table = s->current_picture.qscale_table;
195 for (i = 0; i < s->mb_num; i++) {
196 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
197 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
198 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
203 static void update_duplicate_context_after_me(MpegEncContext *dst,
206 #define COPY(a) dst->a= src->a
208 COPY(current_picture);
214 COPY(picture_in_gop_number);
215 COPY(gop_picture_number);
216 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
217 COPY(progressive_frame); // FIXME don't set in encode_header
218 COPY(partitioned_frame); // FIXME don't set in encode_header
223 * Set the given MpegEncContext to defaults for encoding.
224 * the changed fields will not depend upon the prior state of the MpegEncContext.
226 static void mpv_encode_defaults(MpegEncContext *s)
229 ff_mpv_common_defaults(s);
231 for (i = -16; i < 16; i++) {
232 default_fcode_tab[i + MAX_MV] = 1;
234 s->me.mv_penalty = default_mv_penalty;
235 s->fcode_tab = default_fcode_tab;
237 s->input_picture_number = 0;
238 s->picture_in_gop_number = 0;
241 av_cold int ff_dct_encode_init(MpegEncContext *s) {
243 ff_dct_encode_init_x86(s);
245 if (CONFIG_H263_ENCODER)
246 ff_h263dsp_init(&s->h263dsp);
247 if (!s->dct_quantize)
248 s->dct_quantize = ff_dct_quantize_c;
250 s->denoise_dct = denoise_dct_c;
251 s->fast_dct_quantize = s->dct_quantize;
252 if (s->avctx->trellis)
253 s->dct_quantize = dct_quantize_trellis_c;
258 /* init video encoder */
259 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
261 MpegEncContext *s = avctx->priv_data;
262 int i, ret, format_supported;
264 mpv_encode_defaults(s);
266 switch (avctx->codec_id) {
267 case AV_CODEC_ID_MPEG2VIDEO:
268 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
269 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
270 av_log(avctx, AV_LOG_ERROR,
271 "only YUV420 and YUV422 are supported\n");
275 case AV_CODEC_ID_MJPEG:
276 case AV_CODEC_ID_AMV:
277 format_supported = 0;
278 /* JPEG color space */
279 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
280 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
281 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
282 (avctx->color_range == AVCOL_RANGE_JPEG &&
283 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
284 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
285 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
286 format_supported = 1;
287 /* MPEG color space */
288 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
289 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
290 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
291 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
292 format_supported = 1;
294 if (!format_supported) {
295 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
300 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
301 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
306 switch (avctx->pix_fmt) {
307 case AV_PIX_FMT_YUVJ444P:
308 case AV_PIX_FMT_YUV444P:
309 s->chroma_format = CHROMA_444;
311 case AV_PIX_FMT_YUVJ422P:
312 case AV_PIX_FMT_YUV422P:
313 s->chroma_format = CHROMA_422;
315 case AV_PIX_FMT_YUVJ420P:
316 case AV_PIX_FMT_YUV420P:
318 s->chroma_format = CHROMA_420;
322 s->bit_rate = avctx->bit_rate;
323 s->width = avctx->width;
324 s->height = avctx->height;
325 if (avctx->gop_size > 600 &&
326 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
327 av_log(avctx, AV_LOG_WARNING,
328 "keyframe interval too large!, reducing it from %d to %d\n",
329 avctx->gop_size, 600);
330 avctx->gop_size = 600;
332 s->gop_size = avctx->gop_size;
334 if (avctx->max_b_frames > MAX_B_FRAMES) {
335 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
336 "is %d.\n", MAX_B_FRAMES);
337 avctx->max_b_frames = MAX_B_FRAMES;
339 s->max_b_frames = avctx->max_b_frames;
340 s->codec_id = avctx->codec->id;
341 s->strict_std_compliance = avctx->strict_std_compliance;
342 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
343 s->mpeg_quant = avctx->mpeg_quant;
344 s->rtp_mode = !!avctx->rtp_payload_size;
345 s->intra_dc_precision = avctx->intra_dc_precision;
347 // workaround some differences between how applications specify dc precision
348 if (s->intra_dc_precision < 0) {
349 s->intra_dc_precision += 8;
350 } else if (s->intra_dc_precision >= 8)
351 s->intra_dc_precision -= 8;
353 if (s->intra_dc_precision < 0) {
354 av_log(avctx, AV_LOG_ERROR,
355 "intra dc precision must be positive, note some applications use"
356 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
357 return AVERROR(EINVAL);
360 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
361 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
362 return AVERROR(EINVAL);
364 s->user_specified_pts = AV_NOPTS_VALUE;
366 if (s->gop_size <= 1) {
373 s->me_method = avctx->me_method;
376 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
379 FF_DISABLE_DEPRECATION_WARNINGS
380 if (avctx->border_masking != 0.0)
381 s->border_masking = avctx->border_masking;
382 FF_ENABLE_DEPRECATION_WARNINGS
385 s->adaptive_quant = (s->avctx->lumi_masking ||
386 s->avctx->dark_masking ||
387 s->avctx->temporal_cplx_masking ||
388 s->avctx->spatial_cplx_masking ||
389 s->avctx->p_masking ||
391 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
394 s->loop_filter = !!(s->avctx->flags & CODEC_FLAG_LOOP_FILTER);
396 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
397 switch(avctx->codec_id) {
398 case AV_CODEC_ID_MPEG1VIDEO:
399 case AV_CODEC_ID_MPEG2VIDEO:
400 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
402 case AV_CODEC_ID_MPEG4:
403 case AV_CODEC_ID_MSMPEG4V1:
404 case AV_CODEC_ID_MSMPEG4V2:
405 case AV_CODEC_ID_MSMPEG4V3:
406 if (avctx->rc_max_rate >= 15000000) {
407 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
408 } else if(avctx->rc_max_rate >= 2000000) {
409 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
410 } else if(avctx->rc_max_rate >= 384000) {
411 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
413 avctx->rc_buffer_size = 40;
414 avctx->rc_buffer_size *= 16384;
417 if (avctx->rc_buffer_size) {
418 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
422 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
423 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
427 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
428 av_log(avctx, AV_LOG_INFO,
429 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
432 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
433 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
437 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
438 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
442 if (avctx->rc_max_rate &&
443 avctx->rc_max_rate == avctx->bit_rate &&
444 avctx->rc_max_rate != avctx->rc_min_rate) {
445 av_log(avctx, AV_LOG_INFO,
446 "impossible bitrate constraints, this will fail\n");
449 if (avctx->rc_buffer_size &&
450 avctx->bit_rate * (int64_t)avctx->time_base.num >
451 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
452 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
456 if (!s->fixed_qscale &&
457 avctx->bit_rate * av_q2d(avctx->time_base) >
458 avctx->bit_rate_tolerance) {
459 av_log(avctx, AV_LOG_WARNING,
460 "bitrate tolerance %d too small for bitrate %d, overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
461 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
464 if (s->avctx->rc_max_rate &&
465 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
466 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
467 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
468 90000LL * (avctx->rc_buffer_size - 1) >
469 s->avctx->rc_max_rate * 0xFFFFLL) {
470 av_log(avctx, AV_LOG_INFO,
471 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
472 "specified vbv buffer is too large for the given bitrate!\n");
475 if ((s->avctx->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
476 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
477 s->codec_id != AV_CODEC_ID_FLV1) {
478 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
482 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
483 av_log(avctx, AV_LOG_ERROR,
484 "OBMC is only supported with simple mb decision\n");
488 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
489 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
493 if (s->max_b_frames &&
494 s->codec_id != AV_CODEC_ID_MPEG4 &&
495 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
496 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
497 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
500 if (s->max_b_frames < 0) {
501 av_log(avctx, AV_LOG_ERROR,
502 "max b frames must be 0 or positive for mpegvideo based encoders\n");
506 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
507 s->codec_id == AV_CODEC_ID_H263 ||
508 s->codec_id == AV_CODEC_ID_H263P) &&
509 (avctx->sample_aspect_ratio.num > 255 ||
510 avctx->sample_aspect_ratio.den > 255)) {
511 av_log(avctx, AV_LOG_WARNING,
512 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
513 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
514 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
515 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
518 if ((s->codec_id == AV_CODEC_ID_H263 ||
519 s->codec_id == AV_CODEC_ID_H263P) &&
520 (avctx->width > 2048 ||
521 avctx->height > 1152 )) {
522 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
525 if ((s->codec_id == AV_CODEC_ID_H263 ||
526 s->codec_id == AV_CODEC_ID_H263P) &&
527 ((avctx->width &3) ||
528 (avctx->height&3) )) {
529 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
533 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
534 (avctx->width > 4095 ||
535 avctx->height > 4095 )) {
536 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
540 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
541 (avctx->width > 16383 ||
542 avctx->height > 16383 )) {
543 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
547 if (s->codec_id == AV_CODEC_ID_RV10 &&
549 avctx->height&15 )) {
550 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
551 return AVERROR(EINVAL);
554 if (s->codec_id == AV_CODEC_ID_RV20 &&
557 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
558 return AVERROR(EINVAL);
561 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
562 s->codec_id == AV_CODEC_ID_WMV2) &&
564 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
568 if ((s->avctx->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
569 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
570 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
574 // FIXME mpeg2 uses that too
575 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
576 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
577 av_log(avctx, AV_LOG_ERROR,
578 "mpeg2 style quantization not supported by codec\n");
582 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
583 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
587 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
588 s->avctx->mb_decision != FF_MB_DECISION_RD) {
589 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
593 if (s->avctx->scenechange_threshold < 1000000000 &&
594 (s->avctx->flags & CODEC_FLAG_CLOSED_GOP)) {
595 av_log(avctx, AV_LOG_ERROR,
596 "closed gop with scene change detection are not supported yet, "
597 "set threshold to 1000000000\n");
601 if (s->avctx->flags & CODEC_FLAG_LOW_DELAY) {
602 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
603 av_log(avctx, AV_LOG_ERROR,
604 "low delay forcing is only available for mpeg2\n");
607 if (s->max_b_frames != 0) {
608 av_log(avctx, AV_LOG_ERROR,
609 "b frames cannot be used with low delay\n");
614 if (s->q_scale_type == 1) {
615 if (avctx->qmax > 12) {
616 av_log(avctx, AV_LOG_ERROR,
617 "non linear quant only supports qmax <= 12 currently\n");
622 if (s->avctx->thread_count > 1 &&
623 s->codec_id != AV_CODEC_ID_MPEG4 &&
624 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
625 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
626 s->codec_id != AV_CODEC_ID_MJPEG &&
627 (s->codec_id != AV_CODEC_ID_H263P)) {
628 av_log(avctx, AV_LOG_ERROR,
629 "multi threaded encoding not supported by codec\n");
633 if (s->avctx->thread_count < 1) {
634 av_log(avctx, AV_LOG_ERROR,
635 "automatic thread number detection not supported by codec, "
640 if (s->avctx->slices > 1 || s->avctx->thread_count > 1)
643 if (s->avctx->thread_count > 1 && s->codec_id == AV_CODEC_ID_H263P)
644 s->h263_slice_structured = 1;
646 if (!avctx->time_base.den || !avctx->time_base.num) {
647 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
651 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
652 av_log(avctx, AV_LOG_INFO,
653 "notice: b_frame_strategy only affects the first pass\n");
654 avctx->b_frame_strategy = 0;
657 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
659 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
660 avctx->time_base.den /= i;
661 avctx->time_base.num /= i;
665 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
666 // (a + x * 3 / 8) / x
667 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
668 s->inter_quant_bias = 0;
670 s->intra_quant_bias = 0;
672 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
675 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
676 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
677 return AVERROR(EINVAL);
680 #if FF_API_QUANT_BIAS
681 FF_DISABLE_DEPRECATION_WARNINGS
682 if (s->intra_quant_bias == FF_DEFAULT_QUANT_BIAS &&
683 avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
684 s->intra_quant_bias = avctx->intra_quant_bias;
685 if (s->inter_quant_bias == FF_DEFAULT_QUANT_BIAS &&
686 avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
687 s->inter_quant_bias = avctx->inter_quant_bias;
688 FF_ENABLE_DEPRECATION_WARNINGS
691 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
693 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
694 s->avctx->time_base.den > (1 << 16) - 1) {
695 av_log(avctx, AV_LOG_ERROR,
696 "timebase %d/%d not supported by MPEG 4 standard, "
697 "the maximum admitted value for the timebase denominator "
698 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
702 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
704 switch (avctx->codec->id) {
705 case AV_CODEC_ID_MPEG1VIDEO:
706 s->out_format = FMT_MPEG1;
707 s->low_delay = !!(s->avctx->flags & CODEC_FLAG_LOW_DELAY);
708 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
710 case AV_CODEC_ID_MPEG2VIDEO:
711 s->out_format = FMT_MPEG1;
712 s->low_delay = !!(s->avctx->flags & CODEC_FLAG_LOW_DELAY);
713 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
716 case AV_CODEC_ID_MJPEG:
717 case AV_CODEC_ID_AMV:
718 s->out_format = FMT_MJPEG;
719 s->intra_only = 1; /* force intra only for jpeg */
720 if (!CONFIG_MJPEG_ENCODER ||
721 ff_mjpeg_encode_init(s) < 0)
726 case AV_CODEC_ID_H261:
727 if (!CONFIG_H261_ENCODER)
729 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
730 av_log(avctx, AV_LOG_ERROR,
731 "The specified picture size of %dx%d is not valid for the "
732 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
733 s->width, s->height);
736 s->out_format = FMT_H261;
739 s->rtp_mode = 0; /* Sliced encoding not supported */
741 case AV_CODEC_ID_H263:
742 if (!CONFIG_H263_ENCODER)
744 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
745 s->width, s->height) == 8) {
746 av_log(avctx, AV_LOG_ERROR,
747 "The specified picture size of %dx%d is not valid for "
748 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
749 "352x288, 704x576, and 1408x1152. "
750 "Try H.263+.\n", s->width, s->height);
753 s->out_format = FMT_H263;
757 case AV_CODEC_ID_H263P:
758 s->out_format = FMT_H263;
761 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
762 s->modified_quant = s->h263_aic;
763 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
764 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
767 /* These are just to be sure */
771 case AV_CODEC_ID_FLV1:
772 s->out_format = FMT_H263;
773 s->h263_flv = 2; /* format = 1; 11-bit codes */
774 s->unrestricted_mv = 1;
775 s->rtp_mode = 0; /* don't allow GOB */
779 case AV_CODEC_ID_RV10:
780 s->out_format = FMT_H263;
784 case AV_CODEC_ID_RV20:
785 s->out_format = FMT_H263;
788 s->modified_quant = 1;
792 s->unrestricted_mv = 0;
794 case AV_CODEC_ID_MPEG4:
795 s->out_format = FMT_H263;
797 s->unrestricted_mv = 1;
798 s->low_delay = s->max_b_frames ? 0 : 1;
799 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
801 case AV_CODEC_ID_MSMPEG4V2:
802 s->out_format = FMT_H263;
804 s->unrestricted_mv = 1;
805 s->msmpeg4_version = 2;
809 case AV_CODEC_ID_MSMPEG4V3:
810 s->out_format = FMT_H263;
812 s->unrestricted_mv = 1;
813 s->msmpeg4_version = 3;
814 s->flipflop_rounding = 1;
818 case AV_CODEC_ID_WMV1:
819 s->out_format = FMT_H263;
821 s->unrestricted_mv = 1;
822 s->msmpeg4_version = 4;
823 s->flipflop_rounding = 1;
827 case AV_CODEC_ID_WMV2:
828 s->out_format = FMT_H263;
830 s->unrestricted_mv = 1;
831 s->msmpeg4_version = 5;
832 s->flipflop_rounding = 1;
840 avctx->has_b_frames = !s->low_delay;
844 s->progressive_frame =
845 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
846 CODEC_FLAG_INTERLACED_ME) ||
851 if (ff_mpv_common_init(s) < 0)
854 ff_fdctdsp_init(&s->fdsp, avctx);
855 ff_me_cmp_init(&s->mecc, avctx);
856 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
857 ff_pixblockdsp_init(&s->pdsp, avctx);
858 ff_qpeldsp_init(&s->qdsp);
860 s->avctx->coded_frame = s->current_picture.f;
862 if (s->msmpeg4_version) {
863 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
864 2 * 2 * (MAX_LEVEL + 1) *
865 (MAX_RUN + 1) * 2 * sizeof(int), fail);
867 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
869 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
870 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
871 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
872 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
873 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
874 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
875 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
876 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
877 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
878 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
880 if (s->avctx->noise_reduction) {
881 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
882 2 * 64 * sizeof(uint16_t), fail);
885 ff_dct_encode_init(s);
887 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
888 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
890 s->quant_precision = 5;
892 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
893 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->avctx->frame_skip_cmp);
895 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
896 ff_h261_encode_init(s);
897 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
898 ff_h263_encode_init(s);
899 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
900 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
902 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
903 && s->out_format == FMT_MPEG1)
904 ff_mpeg1_encode_init(s);
907 for (i = 0; i < 64; i++) {
908 int j = s->idsp.idct_permutation[i];
909 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
911 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
912 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
913 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
915 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
918 s->chroma_intra_matrix[j] =
919 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
920 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
922 if (s->avctx->intra_matrix)
923 s->intra_matrix[j] = s->avctx->intra_matrix[i];
924 if (s->avctx->inter_matrix)
925 s->inter_matrix[j] = s->avctx->inter_matrix[i];
928 /* precompute matrix */
929 /* for mjpeg, we do include qscale in the matrix */
930 if (s->out_format != FMT_MJPEG) {
931 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
932 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
934 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
935 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
939 if (ff_rate_control_init(s) < 0)
942 #if FF_API_ERROR_RATE
943 FF_DISABLE_DEPRECATION_WARNINGS
944 if (avctx->error_rate)
945 s->error_rate = avctx->error_rate;
946 FF_ENABLE_DEPRECATION_WARNINGS;
949 #if FF_API_NORMALIZE_AQP
950 FF_DISABLE_DEPRECATION_WARNINGS
951 if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
952 s->mpv_flags |= FF_MPV_FLAG_NAQ;
953 FF_ENABLE_DEPRECATION_WARNINGS;
957 FF_DISABLE_DEPRECATION_WARNINGS
958 if (avctx->flags & CODEC_FLAG_MV0)
959 s->mpv_flags |= FF_MPV_FLAG_MV0;
960 FF_ENABLE_DEPRECATION_WARNINGS
964 FF_DISABLE_DEPRECATION_WARNINGS
965 if (avctx->rc_qsquish != 0.0)
966 s->rc_qsquish = avctx->rc_qsquish;
967 if (avctx->rc_qmod_amp != 0.0)
968 s->rc_qmod_amp = avctx->rc_qmod_amp;
969 if (avctx->rc_qmod_freq)
970 s->rc_qmod_freq = avctx->rc_qmod_freq;
971 if (avctx->rc_buffer_aggressivity != 1.0)
972 s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
973 if (avctx->rc_initial_cplx != 0.0)
974 s->rc_initial_cplx = avctx->rc_initial_cplx;
976 s->lmin = avctx->lmin;
978 s->lmax = avctx->lmax;
982 s->rc_eq = av_strdup(avctx->rc_eq);
984 return AVERROR(ENOMEM);
986 FF_ENABLE_DEPRECATION_WARNINGS
989 if (avctx->b_frame_strategy == 2) {
990 for (i = 0; i < s->max_b_frames + 2; i++) {
991 s->tmp_frames[i] = av_frame_alloc();
992 if (!s->tmp_frames[i])
993 return AVERROR(ENOMEM);
995 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
996 s->tmp_frames[i]->width = s->width >> avctx->brd_scale;
997 s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
999 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1007 ff_mpv_encode_end(avctx);
1008 return AVERROR_UNKNOWN;
1011 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1013 MpegEncContext *s = avctx->priv_data;
1016 ff_rate_control_uninit(s);
1018 ff_mpv_common_end(s);
1019 if (CONFIG_MJPEG_ENCODER &&
1020 s->out_format == FMT_MJPEG)
1021 ff_mjpeg_encode_close(s);
1023 av_freep(&avctx->extradata);
1025 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1026 av_frame_free(&s->tmp_frames[i]);
1028 ff_free_picture_tables(&s->new_picture);
1029 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1031 av_freep(&s->avctx->stats_out);
1032 av_freep(&s->ac_stats);
1034 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1035 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1036 s->q_chroma_intra_matrix= NULL;
1037 s->q_chroma_intra_matrix16= NULL;
1038 av_freep(&s->q_intra_matrix);
1039 av_freep(&s->q_inter_matrix);
1040 av_freep(&s->q_intra_matrix16);
1041 av_freep(&s->q_inter_matrix16);
1042 av_freep(&s->input_picture);
1043 av_freep(&s->reordered_input_picture);
1044 av_freep(&s->dct_offset);
1049 static int get_sae(uint8_t *src, int ref, int stride)
1054 for (y = 0; y < 16; y++) {
1055 for (x = 0; x < 16; x++) {
1056 acc += FFABS(src[x + y * stride] - ref);
1063 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1064 uint8_t *ref, int stride)
1070 h = s->height & ~15;
1072 for (y = 0; y < h; y += 16) {
1073 for (x = 0; x < w; x += 16) {
1074 int offset = x + y * stride;
1075 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1077 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1078 int sae = get_sae(src + offset, mean, stride);
1080 acc += sae + 500 < sad;
1086 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1088 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1089 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1090 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1091 &s->linesize, &s->uvlinesize);
1094 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1096 Picture *pic = NULL;
1098 int i, display_picture_number = 0, ret;
1099 const int encoding_delay = s->max_b_frames ? s->max_b_frames :
1100 (s->low_delay ? 0 : 1);
1105 display_picture_number = s->input_picture_number++;
1107 if (pts != AV_NOPTS_VALUE) {
1108 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1109 int64_t last = s->user_specified_pts;
1112 av_log(s->avctx, AV_LOG_ERROR,
1113 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1115 return AVERROR(EINVAL);
1118 if (!s->low_delay && display_picture_number == 1)
1119 s->dts_delta = pts - last;
1121 s->user_specified_pts = pts;
1123 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1124 s->user_specified_pts =
1125 pts = s->user_specified_pts + 1;
1126 av_log(s->avctx, AV_LOG_INFO,
1127 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1130 pts = display_picture_number;
1136 if (!pic_arg->buf[0] ||
1137 pic_arg->linesize[0] != s->linesize ||
1138 pic_arg->linesize[1] != s->uvlinesize ||
1139 pic_arg->linesize[2] != s->uvlinesize)
1141 if ((s->width & 15) || (s->height & 15))
1143 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1145 if (s->linesize & (STRIDE_ALIGN-1))
1148 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1149 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1151 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1155 pic = &s->picture[i];
1159 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1162 ret = alloc_picture(s, pic, direct);
1167 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1168 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1169 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1172 int h_chroma_shift, v_chroma_shift;
1173 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1177 for (i = 0; i < 3; i++) {
1178 int src_stride = pic_arg->linesize[i];
1179 int dst_stride = i ? s->uvlinesize : s->linesize;
1180 int h_shift = i ? h_chroma_shift : 0;
1181 int v_shift = i ? v_chroma_shift : 0;
1182 int w = s->width >> h_shift;
1183 int h = s->height >> v_shift;
1184 uint8_t *src = pic_arg->data[i];
1185 uint8_t *dst = pic->f->data[i];
1188 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1189 && !s->progressive_sequence
1190 && FFALIGN(s->height, 32) - s->height > 16)
1193 if (!s->avctx->rc_buffer_size)
1194 dst += INPLACE_OFFSET;
1196 if (src_stride == dst_stride)
1197 memcpy(dst, src, src_stride * h);
1200 uint8_t *dst2 = dst;
1202 memcpy(dst2, src, w);
1207 if ((s->width & 15) || (s->height & (vpad-1))) {
1208 s->mpvencdsp.draw_edges(dst, dst_stride,
1217 ret = av_frame_copy_props(pic->f, pic_arg);
1221 pic->f->display_picture_number = display_picture_number;
1222 pic->f->pts = pts; // we set this here to avoid modifiying pic_arg
1225 /* shift buffer entries */
1226 for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1227 s->input_picture[i - 1] = s->input_picture[i];
1229 s->input_picture[encoding_delay] = (Picture*) pic;
1234 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1238 int64_t score64 = 0;
1240 for (plane = 0; plane < 3; plane++) {
1241 const int stride = p->f->linesize[plane];
1242 const int bw = plane ? 1 : 2;
1243 for (y = 0; y < s->mb_height * bw; y++) {
1244 for (x = 0; x < s->mb_width * bw; x++) {
1245 int off = p->shared ? 0 : 16;
1246 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1247 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1248 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1250 switch (FFABS(s->avctx->frame_skip_exp)) {
1251 case 0: score = FFMAX(score, v); break;
1252 case 1: score += FFABS(v); break;
1253 case 2: score64 += v * (int64_t)v; break;
1254 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1255 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1264 if (s->avctx->frame_skip_exp < 0)
1265 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1266 -1.0/s->avctx->frame_skip_exp);
1268 if (score64 < s->avctx->frame_skip_threshold)
1270 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1275 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1277 AVPacket pkt = { 0 };
1278 int ret, got_output;
1280 av_init_packet(&pkt);
1281 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1286 av_free_packet(&pkt);
1290 static int estimate_best_b_count(MpegEncContext *s)
1292 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1293 AVCodecContext *c = avcodec_alloc_context3(NULL);
1294 const int scale = s->avctx->brd_scale;
1295 int i, j, out_size, p_lambda, b_lambda, lambda2;
1296 int64_t best_rd = INT64_MAX;
1297 int best_b_count = -1;
1300 return AVERROR(ENOMEM);
1301 av_assert0(scale >= 0 && scale <= 3);
1304 //s->next_picture_ptr->quality;
1305 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1306 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1307 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1308 if (!b_lambda) // FIXME we should do this somewhere else
1309 b_lambda = p_lambda;
1310 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1313 c->width = s->width >> scale;
1314 c->height = s->height >> scale;
1315 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR;
1316 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1317 c->mb_decision = s->avctx->mb_decision;
1318 c->me_cmp = s->avctx->me_cmp;
1319 c->mb_cmp = s->avctx->mb_cmp;
1320 c->me_sub_cmp = s->avctx->me_sub_cmp;
1321 c->pix_fmt = AV_PIX_FMT_YUV420P;
1322 c->time_base = s->avctx->time_base;
1323 c->max_b_frames = s->max_b_frames;
1325 if (avcodec_open2(c, codec, NULL) < 0)
1328 for (i = 0; i < s->max_b_frames + 2; i++) {
1329 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1330 s->next_picture_ptr;
1333 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1334 pre_input = *pre_input_ptr;
1335 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1337 if (!pre_input.shared && i) {
1338 data[0] += INPLACE_OFFSET;
1339 data[1] += INPLACE_OFFSET;
1340 data[2] += INPLACE_OFFSET;
1343 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1344 s->tmp_frames[i]->linesize[0],
1346 pre_input.f->linesize[0],
1347 c->width, c->height);
1348 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1349 s->tmp_frames[i]->linesize[1],
1351 pre_input.f->linesize[1],
1352 c->width >> 1, c->height >> 1);
1353 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1354 s->tmp_frames[i]->linesize[2],
1356 pre_input.f->linesize[2],
1357 c->width >> 1, c->height >> 1);
1361 for (j = 0; j < s->max_b_frames + 1; j++) {
1364 if (!s->input_picture[j])
1367 c->error[0] = c->error[1] = c->error[2] = 0;
1369 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1370 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1372 out_size = encode_frame(c, s->tmp_frames[0]);
1374 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1376 for (i = 0; i < s->max_b_frames + 1; i++) {
1377 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1379 s->tmp_frames[i + 1]->pict_type = is_p ?
1380 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1381 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1383 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1385 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1388 /* get the delayed frames */
1390 out_size = encode_frame(c, NULL);
1391 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1394 rd += c->error[0] + c->error[1] + c->error[2];
1405 return best_b_count;
1408 static int select_input_picture(MpegEncContext *s)
1412 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1413 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1414 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1416 /* set next picture type & ordering */
1417 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1418 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1419 if (s->picture_in_gop_number < s->gop_size &&
1420 s->next_picture_ptr &&
1421 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1422 // FIXME check that te gop check above is +-1 correct
1423 av_frame_unref(s->input_picture[0]->f);
1425 ff_vbv_update(s, 0);
1431 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1432 !s->next_picture_ptr || s->intra_only) {
1433 s->reordered_input_picture[0] = s->input_picture[0];
1434 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1435 s->reordered_input_picture[0]->f->coded_picture_number =
1436 s->coded_picture_number++;
1440 if (s->avctx->flags & CODEC_FLAG_PASS2) {
1441 for (i = 0; i < s->max_b_frames + 1; i++) {
1442 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1444 if (pict_num >= s->rc_context.num_entries)
1446 if (!s->input_picture[i]) {
1447 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1451 s->input_picture[i]->f->pict_type =
1452 s->rc_context.entry[pict_num].new_pict_type;
1456 if (s->avctx->b_frame_strategy == 0) {
1457 b_frames = s->max_b_frames;
1458 while (b_frames && !s->input_picture[b_frames])
1460 } else if (s->avctx->b_frame_strategy == 1) {
1461 for (i = 1; i < s->max_b_frames + 1; i++) {
1462 if (s->input_picture[i] &&
1463 s->input_picture[i]->b_frame_score == 0) {
1464 s->input_picture[i]->b_frame_score =
1466 s->input_picture[i ]->f->data[0],
1467 s->input_picture[i - 1]->f->data[0],
1471 for (i = 0; i < s->max_b_frames + 1; i++) {
1472 if (!s->input_picture[i] ||
1473 s->input_picture[i]->b_frame_score - 1 >
1474 s->mb_num / s->avctx->b_sensitivity)
1478 b_frames = FFMAX(0, i - 1);
1481 for (i = 0; i < b_frames + 1; i++) {
1482 s->input_picture[i]->b_frame_score = 0;
1484 } else if (s->avctx->b_frame_strategy == 2) {
1485 b_frames = estimate_best_b_count(s);
1487 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1493 for (i = b_frames - 1; i >= 0; i--) {
1494 int type = s->input_picture[i]->f->pict_type;
1495 if (type && type != AV_PICTURE_TYPE_B)
1498 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1499 b_frames == s->max_b_frames) {
1500 av_log(s->avctx, AV_LOG_ERROR,
1501 "warning, too many b frames in a row\n");
1504 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1505 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1506 s->gop_size > s->picture_in_gop_number) {
1507 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1509 if (s->avctx->flags & CODEC_FLAG_CLOSED_GOP)
1511 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1515 if ((s->avctx->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1516 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1519 s->reordered_input_picture[0] = s->input_picture[b_frames];
1520 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1521 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1522 s->reordered_input_picture[0]->f->coded_picture_number =
1523 s->coded_picture_number++;
1524 for (i = 0; i < b_frames; i++) {
1525 s->reordered_input_picture[i + 1] = s->input_picture[i];
1526 s->reordered_input_picture[i + 1]->f->pict_type =
1528 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1529 s->coded_picture_number++;
1534 if (s->reordered_input_picture[0]) {
1535 s->reordered_input_picture[0]->reference =
1536 s->reordered_input_picture[0]->f->pict_type !=
1537 AV_PICTURE_TYPE_B ? 3 : 0;
1539 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1540 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1543 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1544 // input is a shared pix, so we can't modifiy it -> alloc a new
1545 // one & ensure that the shared one is reuseable
1548 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1551 pic = &s->picture[i];
1553 pic->reference = s->reordered_input_picture[0]->reference;
1554 if (alloc_picture(s, pic, 0) < 0) {
1558 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1562 /* mark us unused / free shared pic */
1563 av_frame_unref(s->reordered_input_picture[0]->f);
1564 s->reordered_input_picture[0]->shared = 0;
1566 s->current_picture_ptr = pic;
1568 // input is not a shared pix -> reuse buffer for current_pix
1569 s->current_picture_ptr = s->reordered_input_picture[0];
1570 for (i = 0; i < 4; i++) {
1571 s->new_picture.f->data[i] += INPLACE_OFFSET;
1574 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1575 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1576 s->current_picture_ptr)) < 0)
1579 s->picture_number = s->new_picture.f->display_picture_number;
1581 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1586 static void frame_end(MpegEncContext *s)
1588 if (s->unrestricted_mv &&
1589 s->current_picture.reference &&
1591 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1592 int hshift = desc->log2_chroma_w;
1593 int vshift = desc->log2_chroma_h;
1594 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1595 s->current_picture.f->linesize[0],
1596 s->h_edge_pos, s->v_edge_pos,
1597 EDGE_WIDTH, EDGE_WIDTH,
1598 EDGE_TOP | EDGE_BOTTOM);
1599 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1600 s->current_picture.f->linesize[1],
1601 s->h_edge_pos >> hshift,
1602 s->v_edge_pos >> vshift,
1603 EDGE_WIDTH >> hshift,
1604 EDGE_WIDTH >> vshift,
1605 EDGE_TOP | EDGE_BOTTOM);
1606 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1607 s->current_picture.f->linesize[2],
1608 s->h_edge_pos >> hshift,
1609 s->v_edge_pos >> vshift,
1610 EDGE_WIDTH >> hshift,
1611 EDGE_WIDTH >> vshift,
1612 EDGE_TOP | EDGE_BOTTOM);
1617 s->last_pict_type = s->pict_type;
1618 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1619 if (s->pict_type!= AV_PICTURE_TYPE_B)
1620 s->last_non_b_pict_type = s->pict_type;
1622 s->avctx->coded_frame = s->current_picture_ptr->f;
1626 static void update_noise_reduction(MpegEncContext *s)
1630 for (intra = 0; intra < 2; intra++) {
1631 if (s->dct_count[intra] > (1 << 16)) {
1632 for (i = 0; i < 64; i++) {
1633 s->dct_error_sum[intra][i] >>= 1;
1635 s->dct_count[intra] >>= 1;
1638 for (i = 0; i < 64; i++) {
1639 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1640 s->dct_count[intra] +
1641 s->dct_error_sum[intra][i] / 2) /
1642 (s->dct_error_sum[intra][i] + 1);
1647 static int frame_start(MpegEncContext *s)
1651 /* mark & release old frames */
1652 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1653 s->last_picture_ptr != s->next_picture_ptr &&
1654 s->last_picture_ptr->f->buf[0]) {
1655 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1658 s->current_picture_ptr->f->pict_type = s->pict_type;
1659 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1661 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1662 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1663 s->current_picture_ptr)) < 0)
1666 if (s->pict_type != AV_PICTURE_TYPE_B) {
1667 s->last_picture_ptr = s->next_picture_ptr;
1669 s->next_picture_ptr = s->current_picture_ptr;
1672 if (s->last_picture_ptr) {
1673 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1674 if (s->last_picture_ptr->f->buf[0] &&
1675 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1676 s->last_picture_ptr)) < 0)
1679 if (s->next_picture_ptr) {
1680 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1681 if (s->next_picture_ptr->f->buf[0] &&
1682 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1683 s->next_picture_ptr)) < 0)
1687 if (s->picture_structure!= PICT_FRAME) {
1689 for (i = 0; i < 4; i++) {
1690 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1691 s->current_picture.f->data[i] +=
1692 s->current_picture.f->linesize[i];
1694 s->current_picture.f->linesize[i] *= 2;
1695 s->last_picture.f->linesize[i] *= 2;
1696 s->next_picture.f->linesize[i] *= 2;
1700 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1701 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1702 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1703 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1704 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1705 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1707 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1708 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1711 if (s->dct_error_sum) {
1712 av_assert2(s->avctx->noise_reduction && s->encoding);
1713 update_noise_reduction(s);
1719 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1720 const AVFrame *pic_arg, int *got_packet)
1722 MpegEncContext *s = avctx->priv_data;
1723 int i, stuffing_count, ret;
1724 int context_count = s->slice_context_count;
1726 s->picture_in_gop_number++;
1728 if (load_input_picture(s, pic_arg) < 0)
1731 if (select_input_picture(s) < 0) {
1736 if (s->new_picture.f->data[0]) {
1737 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1738 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - FF_INPUT_BUFFER_PADDING_SIZE
1740 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1741 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size)) < 0)
1744 s->mb_info_ptr = av_packet_new_side_data(pkt,
1745 AV_PKT_DATA_H263_MB_INFO,
1746 s->mb_width*s->mb_height*12);
1747 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1750 for (i = 0; i < context_count; i++) {
1751 int start_y = s->thread_context[i]->start_mb_y;
1752 int end_y = s->thread_context[i]-> end_mb_y;
1753 int h = s->mb_height;
1754 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1755 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1757 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1760 s->pict_type = s->new_picture.f->pict_type;
1762 ret = frame_start(s);
1766 ret = encode_picture(s, s->picture_number);
1767 if (growing_buffer) {
1768 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1769 pkt->data = s->pb.buf;
1770 pkt->size = avctx->internal->byte_buffer_size;
1775 avctx->header_bits = s->header_bits;
1776 avctx->mv_bits = s->mv_bits;
1777 avctx->misc_bits = s->misc_bits;
1778 avctx->i_tex_bits = s->i_tex_bits;
1779 avctx->p_tex_bits = s->p_tex_bits;
1780 avctx->i_count = s->i_count;
1781 // FIXME f/b_count in avctx
1782 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1783 avctx->skip_count = s->skip_count;
1787 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1788 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1790 if (avctx->rc_buffer_size) {
1791 RateControlContext *rcc = &s->rc_context;
1792 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1794 if (put_bits_count(&s->pb) > max_size &&
1795 s->lambda < s->lmax) {
1796 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1797 (s->qscale + 1) / s->qscale);
1798 if (s->adaptive_quant) {
1800 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1801 s->lambda_table[i] =
1802 FFMAX(s->lambda_table[i] + 1,
1803 s->lambda_table[i] * (s->qscale + 1) /
1806 s->mb_skipped = 0; // done in frame_start()
1807 // done in encode_picture() so we must undo it
1808 if (s->pict_type == AV_PICTURE_TYPE_P) {
1809 if (s->flipflop_rounding ||
1810 s->codec_id == AV_CODEC_ID_H263P ||
1811 s->codec_id == AV_CODEC_ID_MPEG4)
1812 s->no_rounding ^= 1;
1814 if (s->pict_type != AV_PICTURE_TYPE_B) {
1815 s->time_base = s->last_time_base;
1816 s->last_non_b_time = s->time - s->pp_time;
1818 for (i = 0; i < context_count; i++) {
1819 PutBitContext *pb = &s->thread_context[i]->pb;
1820 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1822 av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1826 av_assert0(s->avctx->rc_max_rate);
1829 if (s->avctx->flags & CODEC_FLAG_PASS1)
1830 ff_write_pass1_stats(s);
1832 for (i = 0; i < 4; i++) {
1833 s->current_picture_ptr->f->error[i] =
1834 s->current_picture.f->error[i] =
1835 s->current_picture.error[i];
1836 avctx->error[i] += s->current_picture_ptr->f->error[i];
1839 if (s->avctx->flags & CODEC_FLAG_PASS1)
1840 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1841 avctx->i_tex_bits + avctx->p_tex_bits ==
1842 put_bits_count(&s->pb));
1843 flush_put_bits(&s->pb);
1844 s->frame_bits = put_bits_count(&s->pb);
1846 stuffing_count = ff_vbv_update(s, s->frame_bits);
1847 s->stuffing_bits = 8*stuffing_count;
1848 if (stuffing_count) {
1849 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1850 stuffing_count + 50) {
1851 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1855 switch (s->codec_id) {
1856 case AV_CODEC_ID_MPEG1VIDEO:
1857 case AV_CODEC_ID_MPEG2VIDEO:
1858 while (stuffing_count--) {
1859 put_bits(&s->pb, 8, 0);
1862 case AV_CODEC_ID_MPEG4:
1863 put_bits(&s->pb, 16, 0);
1864 put_bits(&s->pb, 16, 0x1C3);
1865 stuffing_count -= 4;
1866 while (stuffing_count--) {
1867 put_bits(&s->pb, 8, 0xFF);
1871 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1873 flush_put_bits(&s->pb);
1874 s->frame_bits = put_bits_count(&s->pb);
1877 /* update mpeg1/2 vbv_delay for CBR */
1878 if (s->avctx->rc_max_rate &&
1879 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1880 s->out_format == FMT_MPEG1 &&
1881 90000LL * (avctx->rc_buffer_size - 1) <=
1882 s->avctx->rc_max_rate * 0xFFFFLL) {
1883 int vbv_delay, min_delay;
1884 double inbits = s->avctx->rc_max_rate *
1885 av_q2d(s->avctx->time_base);
1886 int minbits = s->frame_bits - 8 *
1887 (s->vbv_delay_ptr - s->pb.buf - 1);
1888 double bits = s->rc_context.buffer_index + minbits - inbits;
1891 av_log(s->avctx, AV_LOG_ERROR,
1892 "Internal error, negative bits\n");
1894 assert(s->repeat_first_field == 0);
1896 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1897 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1898 s->avctx->rc_max_rate;
1900 vbv_delay = FFMAX(vbv_delay, min_delay);
1902 av_assert0(vbv_delay < 0xFFFF);
1904 s->vbv_delay_ptr[0] &= 0xF8;
1905 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1906 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1907 s->vbv_delay_ptr[2] &= 0x07;
1908 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1909 avctx->vbv_delay = vbv_delay * 300;
1911 s->total_bits += s->frame_bits;
1912 avctx->frame_bits = s->frame_bits;
1914 pkt->pts = s->current_picture.f->pts;
1915 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1916 if (!s->current_picture.f->coded_picture_number)
1917 pkt->dts = pkt->pts - s->dts_delta;
1919 pkt->dts = s->reordered_pts;
1920 s->reordered_pts = pkt->pts;
1922 pkt->dts = pkt->pts;
1923 if (s->current_picture.f->key_frame)
1924 pkt->flags |= AV_PKT_FLAG_KEY;
1926 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1931 /* release non-reference frames */
1932 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1933 if (!s->picture[i].reference)
1934 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1937 av_assert1((s->frame_bits & 7) == 0);
1939 pkt->size = s->frame_bits / 8;
1940 *got_packet = !!pkt->size;
1944 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1945 int n, int threshold)
1947 static const char tab[64] = {
1948 3, 2, 2, 1, 1, 1, 1, 1,
1949 1, 1, 1, 1, 1, 1, 1, 1,
1950 1, 1, 1, 1, 1, 1, 1, 1,
1951 0, 0, 0, 0, 0, 0, 0, 0,
1952 0, 0, 0, 0, 0, 0, 0, 0,
1953 0, 0, 0, 0, 0, 0, 0, 0,
1954 0, 0, 0, 0, 0, 0, 0, 0,
1955 0, 0, 0, 0, 0, 0, 0, 0
1960 int16_t *block = s->block[n];
1961 const int last_index = s->block_last_index[n];
1964 if (threshold < 0) {
1966 threshold = -threshold;
1970 /* Are all we could set to zero already zero? */
1971 if (last_index <= skip_dc - 1)
1974 for (i = 0; i <= last_index; i++) {
1975 const int j = s->intra_scantable.permutated[i];
1976 const int level = FFABS(block[j]);
1978 if (skip_dc && i == 0)
1982 } else if (level > 1) {
1988 if (score >= threshold)
1990 for (i = skip_dc; i <= last_index; i++) {
1991 const int j = s->intra_scantable.permutated[i];
1995 s->block_last_index[n] = 0;
1997 s->block_last_index[n] = -1;
2000 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2004 const int maxlevel = s->max_qcoeff;
2005 const int minlevel = s->min_qcoeff;
2009 i = 1; // skip clipping of intra dc
2013 for (; i <= last_index; i++) {
2014 const int j = s->intra_scantable.permutated[i];
2015 int level = block[j];
2017 if (level > maxlevel) {
2020 } else if (level < minlevel) {
2028 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2029 av_log(s->avctx, AV_LOG_INFO,
2030 "warning, clipping %d dct coefficients to %d..%d\n",
2031 overflow, minlevel, maxlevel);
2034 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2038 for (y = 0; y < 8; y++) {
2039 for (x = 0; x < 8; x++) {
2045 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2046 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2047 int v = ptr[x2 + y2 * stride];
2053 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2058 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2059 int motion_x, int motion_y,
2060 int mb_block_height,
2064 int16_t weight[12][64];
2065 int16_t orig[12][64];
2066 const int mb_x = s->mb_x;
2067 const int mb_y = s->mb_y;
2070 int dct_offset = s->linesize * 8; // default for progressive frames
2071 int uv_dct_offset = s->uvlinesize * 8;
2072 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2073 ptrdiff_t wrap_y, wrap_c;
2075 for (i = 0; i < mb_block_count; i++)
2076 skip_dct[i] = s->skipdct;
2078 if (s->adaptive_quant) {
2079 const int last_qp = s->qscale;
2080 const int mb_xy = mb_x + mb_y * s->mb_stride;
2082 s->lambda = s->lambda_table[mb_xy];
2085 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2086 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2087 s->dquant = s->qscale - last_qp;
2089 if (s->out_format == FMT_H263) {
2090 s->dquant = av_clip(s->dquant, -2, 2);
2092 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2094 if (s->pict_type == AV_PICTURE_TYPE_B) {
2095 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2098 if (s->mv_type == MV_TYPE_8X8)
2104 ff_set_qscale(s, last_qp + s->dquant);
2105 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2106 ff_set_qscale(s, s->qscale + s->dquant);
2108 wrap_y = s->linesize;
2109 wrap_c = s->uvlinesize;
2110 ptr_y = s->new_picture.f->data[0] +
2111 (mb_y * 16 * wrap_y) + mb_x * 16;
2112 ptr_cb = s->new_picture.f->data[1] +
2113 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2114 ptr_cr = s->new_picture.f->data[2] +
2115 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2117 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2118 uint8_t *ebuf = s->sc.edge_emu_buffer + 36 * wrap_y;
2119 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2120 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2121 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2123 16, 16, mb_x * 16, mb_y * 16,
2124 s->width, s->height);
2126 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2128 mb_block_width, mb_block_height,
2129 mb_x * mb_block_width, mb_y * mb_block_height,
2131 ptr_cb = ebuf + 16 * wrap_y;
2132 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2134 mb_block_width, mb_block_height,
2135 mb_x * mb_block_width, mb_y * mb_block_height,
2137 ptr_cr = ebuf + 16 * wrap_y + 16;
2141 if (s->avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
2142 int progressive_score, interlaced_score;
2144 s->interlaced_dct = 0;
2145 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2146 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2147 NULL, wrap_y, 8) - 400;
2149 if (progressive_score > 0) {
2150 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2151 NULL, wrap_y * 2, 8) +
2152 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2153 NULL, wrap_y * 2, 8);
2154 if (progressive_score > interlaced_score) {
2155 s->interlaced_dct = 1;
2157 dct_offset = wrap_y;
2158 uv_dct_offset = wrap_c;
2160 if (s->chroma_format == CHROMA_422 ||
2161 s->chroma_format == CHROMA_444)
2167 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2168 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2169 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2170 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2172 if (s->avctx->flags & CODEC_FLAG_GRAY) {
2176 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2177 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2178 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2179 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2180 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2181 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2182 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2183 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2184 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2185 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2186 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2187 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2191 op_pixels_func (*op_pix)[4];
2192 qpel_mc_func (*op_qpix)[16];
2193 uint8_t *dest_y, *dest_cb, *dest_cr;
2195 dest_y = s->dest[0];
2196 dest_cb = s->dest[1];
2197 dest_cr = s->dest[2];
2199 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2200 op_pix = s->hdsp.put_pixels_tab;
2201 op_qpix = s->qdsp.put_qpel_pixels_tab;
2203 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2204 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2207 if (s->mv_dir & MV_DIR_FORWARD) {
2208 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2209 s->last_picture.f->data,
2211 op_pix = s->hdsp.avg_pixels_tab;
2212 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2214 if (s->mv_dir & MV_DIR_BACKWARD) {
2215 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2216 s->next_picture.f->data,
2220 if (s->avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
2221 int progressive_score, interlaced_score;
2223 s->interlaced_dct = 0;
2224 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2225 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2229 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2230 progressive_score -= 400;
2232 if (progressive_score > 0) {
2233 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2235 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2239 if (progressive_score > interlaced_score) {
2240 s->interlaced_dct = 1;
2242 dct_offset = wrap_y;
2243 uv_dct_offset = wrap_c;
2245 if (s->chroma_format == CHROMA_422)
2251 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2252 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2253 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2254 dest_y + dct_offset, wrap_y);
2255 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2256 dest_y + dct_offset + 8, wrap_y);
2258 if (s->avctx->flags & CODEC_FLAG_GRAY) {
2262 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2263 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2264 if (!s->chroma_y_shift) { /* 422 */
2265 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2266 dest_cb + uv_dct_offset, wrap_c);
2267 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2268 dest_cr + uv_dct_offset, wrap_c);
2271 /* pre quantization */
2272 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2273 2 * s->qscale * s->qscale) {
2275 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2277 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2279 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2280 wrap_y, 8) < 20 * s->qscale)
2282 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2283 wrap_y, 8) < 20 * s->qscale)
2285 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2287 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2289 if (!s->chroma_y_shift) { /* 422 */
2290 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2291 dest_cb + uv_dct_offset,
2292 wrap_c, 8) < 20 * s->qscale)
2294 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2295 dest_cr + uv_dct_offset,
2296 wrap_c, 8) < 20 * s->qscale)
2302 if (s->quantizer_noise_shaping) {
2304 get_visual_weight(weight[0], ptr_y , wrap_y);
2306 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2308 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2310 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2312 get_visual_weight(weight[4], ptr_cb , wrap_c);
2314 get_visual_weight(weight[5], ptr_cr , wrap_c);
2315 if (!s->chroma_y_shift) { /* 422 */
2317 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2320 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2323 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2326 /* DCT & quantize */
2327 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2329 for (i = 0; i < mb_block_count; i++) {
2332 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2333 // FIXME we could decide to change to quantizer instead of
2335 // JS: I don't think that would be a good idea it could lower
2336 // quality instead of improve it. Just INTRADC clipping
2337 // deserves changes in quantizer
2339 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2341 s->block_last_index[i] = -1;
2343 if (s->quantizer_noise_shaping) {
2344 for (i = 0; i < mb_block_count; i++) {
2346 s->block_last_index[i] =
2347 dct_quantize_refine(s, s->block[i], weight[i],
2348 orig[i], i, s->qscale);
2353 if (s->luma_elim_threshold && !s->mb_intra)
2354 for (i = 0; i < 4; i++)
2355 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2356 if (s->chroma_elim_threshold && !s->mb_intra)
2357 for (i = 4; i < mb_block_count; i++)
2358 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2360 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2361 for (i = 0; i < mb_block_count; i++) {
2362 if (s->block_last_index[i] == -1)
2363 s->coded_score[i] = INT_MAX / 256;
2368 if ((s->avctx->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
2369 s->block_last_index[4] =
2370 s->block_last_index[5] = 0;
2372 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2373 if (!s->chroma_y_shift) { /* 422 / 444 */
2374 for (i=6; i<12; i++) {
2375 s->block_last_index[i] = 0;
2376 s->block[i][0] = s->block[4][0];
2381 // non c quantize code returns incorrect block_last_index FIXME
2382 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2383 for (i = 0; i < mb_block_count; i++) {
2385 if (s->block_last_index[i] > 0) {
2386 for (j = 63; j > 0; j--) {
2387 if (s->block[i][s->intra_scantable.permutated[j]])
2390 s->block_last_index[i] = j;
2395 /* huffman encode */
2396 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2397 case AV_CODEC_ID_MPEG1VIDEO:
2398 case AV_CODEC_ID_MPEG2VIDEO:
2399 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2400 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2402 case AV_CODEC_ID_MPEG4:
2403 if (CONFIG_MPEG4_ENCODER)
2404 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2406 case AV_CODEC_ID_MSMPEG4V2:
2407 case AV_CODEC_ID_MSMPEG4V3:
2408 case AV_CODEC_ID_WMV1:
2409 if (CONFIG_MSMPEG4_ENCODER)
2410 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2412 case AV_CODEC_ID_WMV2:
2413 if (CONFIG_WMV2_ENCODER)
2414 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2416 case AV_CODEC_ID_H261:
2417 if (CONFIG_H261_ENCODER)
2418 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2420 case AV_CODEC_ID_H263:
2421 case AV_CODEC_ID_H263P:
2422 case AV_CODEC_ID_FLV1:
2423 case AV_CODEC_ID_RV10:
2424 case AV_CODEC_ID_RV20:
2425 if (CONFIG_H263_ENCODER)
2426 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2428 case AV_CODEC_ID_MJPEG:
2429 case AV_CODEC_ID_AMV:
2430 if (CONFIG_MJPEG_ENCODER)
2431 ff_mjpeg_encode_mb(s, s->block);
2438 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2440 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2441 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2442 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2445 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2448 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2451 d->mb_skip_run= s->mb_skip_run;
2453 d->last_dc[i] = s->last_dc[i];
2456 d->mv_bits= s->mv_bits;
2457 d->i_tex_bits= s->i_tex_bits;
2458 d->p_tex_bits= s->p_tex_bits;
2459 d->i_count= s->i_count;
2460 d->f_count= s->f_count;
2461 d->b_count= s->b_count;
2462 d->skip_count= s->skip_count;
2463 d->misc_bits= s->misc_bits;
2467 d->qscale= s->qscale;
2468 d->dquant= s->dquant;
2470 d->esc3_level_length= s->esc3_level_length;
2473 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2476 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2477 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2480 d->mb_skip_run= s->mb_skip_run;
2482 d->last_dc[i] = s->last_dc[i];
2485 d->mv_bits= s->mv_bits;
2486 d->i_tex_bits= s->i_tex_bits;
2487 d->p_tex_bits= s->p_tex_bits;
2488 d->i_count= s->i_count;
2489 d->f_count= s->f_count;
2490 d->b_count= s->b_count;
2491 d->skip_count= s->skip_count;
2492 d->misc_bits= s->misc_bits;
2494 d->mb_intra= s->mb_intra;
2495 d->mb_skipped= s->mb_skipped;
2496 d->mv_type= s->mv_type;
2497 d->mv_dir= s->mv_dir;
2499 if(s->data_partitioning){
2501 d->tex_pb= s->tex_pb;
2505 d->block_last_index[i]= s->block_last_index[i];
2506 d->interlaced_dct= s->interlaced_dct;
2507 d->qscale= s->qscale;
2509 d->esc3_level_length= s->esc3_level_length;
2512 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2513 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2514 int *dmin, int *next_block, int motion_x, int motion_y)
2517 uint8_t *dest_backup[3];
2519 copy_context_before_encode(s, backup, type);
2521 s->block= s->blocks[*next_block];
2522 s->pb= pb[*next_block];
2523 if(s->data_partitioning){
2524 s->pb2 = pb2 [*next_block];
2525 s->tex_pb= tex_pb[*next_block];
2529 memcpy(dest_backup, s->dest, sizeof(s->dest));
2530 s->dest[0] = s->sc.rd_scratchpad;
2531 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2532 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2533 av_assert0(s->linesize >= 32); //FIXME
2536 encode_mb(s, motion_x, motion_y);
2538 score= put_bits_count(&s->pb);
2539 if(s->data_partitioning){
2540 score+= put_bits_count(&s->pb2);
2541 score+= put_bits_count(&s->tex_pb);
2544 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2545 ff_mpv_decode_mb(s, s->block);
2547 score *= s->lambda2;
2548 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2552 memcpy(s->dest, dest_backup, sizeof(s->dest));
2559 copy_context_after_encode(best, s, type);
2563 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2564 uint32_t *sq = ff_square_tab + 256;
2569 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2570 else if(w==8 && h==8)
2571 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2575 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2584 static int sse_mb(MpegEncContext *s){
2588 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2589 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2592 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2593 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2594 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2595 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2597 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2598 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2599 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2602 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2603 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2604 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2607 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2608 MpegEncContext *s= *(void**)arg;
2612 s->me.dia_size= s->avctx->pre_dia_size;
2613 s->first_slice_line=1;
2614 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2615 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2616 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2618 s->first_slice_line=0;
2626 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2627 MpegEncContext *s= *(void**)arg;
2629 ff_check_alignment();
2631 s->me.dia_size= s->avctx->dia_size;
2632 s->first_slice_line=1;
2633 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2634 s->mb_x=0; //for block init below
2635 ff_init_block_index(s);
2636 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2637 s->block_index[0]+=2;
2638 s->block_index[1]+=2;
2639 s->block_index[2]+=2;
2640 s->block_index[3]+=2;
2642 /* compute motion vector & mb_type and store in context */
2643 if(s->pict_type==AV_PICTURE_TYPE_B)
2644 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2646 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2648 s->first_slice_line=0;
2653 static int mb_var_thread(AVCodecContext *c, void *arg){
2654 MpegEncContext *s= *(void**)arg;
2657 ff_check_alignment();
2659 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2660 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2663 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2665 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2667 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2668 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2670 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2671 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2672 s->me.mb_var_sum_temp += varc;
2678 static void write_slice_end(MpegEncContext *s){
2679 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2680 if(s->partitioned_frame){
2681 ff_mpeg4_merge_partitions(s);
2684 ff_mpeg4_stuffing(&s->pb);
2685 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2686 ff_mjpeg_encode_stuffing(s);
2689 avpriv_align_put_bits(&s->pb);
2690 flush_put_bits(&s->pb);
2692 if ((s->avctx->flags & CODEC_FLAG_PASS1) && !s->partitioned_frame)
2693 s->misc_bits+= get_bits_diff(s);
2696 static void write_mb_info(MpegEncContext *s)
2698 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2699 int offset = put_bits_count(&s->pb);
2700 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2701 int gobn = s->mb_y / s->gob_index;
2703 if (CONFIG_H263_ENCODER)
2704 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2705 bytestream_put_le32(&ptr, offset);
2706 bytestream_put_byte(&ptr, s->qscale);
2707 bytestream_put_byte(&ptr, gobn);
2708 bytestream_put_le16(&ptr, mba);
2709 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2710 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2711 /* 4MV not implemented */
2712 bytestream_put_byte(&ptr, 0); /* hmv2 */
2713 bytestream_put_byte(&ptr, 0); /* vmv2 */
2716 static void update_mb_info(MpegEncContext *s, int startcode)
2720 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2721 s->mb_info_size += 12;
2722 s->prev_mb_info = s->last_mb_info;
2725 s->prev_mb_info = put_bits_count(&s->pb)/8;
2726 /* This might have incremented mb_info_size above, and we return without
2727 * actually writing any info into that slot yet. But in that case,
2728 * this will be called again at the start of the after writing the
2729 * start code, actually writing the mb info. */
2733 s->last_mb_info = put_bits_count(&s->pb)/8;
2734 if (!s->mb_info_size)
2735 s->mb_info_size += 12;
2739 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2741 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2742 && s->slice_context_count == 1
2743 && s->pb.buf == s->avctx->internal->byte_buffer) {
2744 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2745 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2747 uint8_t *new_buffer = NULL;
2748 int new_buffer_size = 0;
2750 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2751 s->avctx->internal->byte_buffer_size + size_increase);
2753 return AVERROR(ENOMEM);
2755 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2756 av_free(s->avctx->internal->byte_buffer);
2757 s->avctx->internal->byte_buffer = new_buffer;
2758 s->avctx->internal->byte_buffer_size = new_buffer_size;
2759 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2760 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2761 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2763 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2764 return AVERROR(EINVAL);
2768 static int encode_thread(AVCodecContext *c, void *arg){
2769 MpegEncContext *s= *(void**)arg;
2770 int mb_x, mb_y, pdif = 0;
2771 int chr_h= 16>>s->chroma_y_shift;
2773 MpegEncContext best_s = { 0 }, backup_s;
2774 uint8_t bit_buf[2][MAX_MB_BYTES];
2775 uint8_t bit_buf2[2][MAX_MB_BYTES];
2776 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2777 PutBitContext pb[2], pb2[2], tex_pb[2];
2779 ff_check_alignment();
2782 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2783 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2784 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2787 s->last_bits= put_bits_count(&s->pb);
2798 /* init last dc values */
2799 /* note: quant matrix value (8) is implied here */
2800 s->last_dc[i] = 128 << s->intra_dc_precision;
2802 s->current_picture.error[i] = 0;
2804 if(s->codec_id==AV_CODEC_ID_AMV){
2805 s->last_dc[0] = 128*8/13;
2806 s->last_dc[1] = 128*8/14;
2807 s->last_dc[2] = 128*8/14;
2810 memset(s->last_mv, 0, sizeof(s->last_mv));
2814 switch(s->codec_id){
2815 case AV_CODEC_ID_H263:
2816 case AV_CODEC_ID_H263P:
2817 case AV_CODEC_ID_FLV1:
2818 if (CONFIG_H263_ENCODER)
2819 s->gob_index = H263_GOB_HEIGHT(s->height);
2821 case AV_CODEC_ID_MPEG4:
2822 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2823 ff_mpeg4_init_partitions(s);
2829 s->first_slice_line = 1;
2830 s->ptr_lastgob = s->pb.buf;
2831 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2835 ff_set_qscale(s, s->qscale);
2836 ff_init_block_index(s);
2838 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2839 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2840 int mb_type= s->mb_type[xy];
2844 int size_increase = s->avctx->internal->byte_buffer_size/4
2845 + s->mb_width*MAX_MB_BYTES;
2847 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2848 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2849 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2852 if(s->data_partitioning){
2853 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2854 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2855 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2861 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2862 ff_update_block_index(s);
2864 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2865 ff_h261_reorder_mb_index(s);
2866 xy= s->mb_y*s->mb_stride + s->mb_x;
2867 mb_type= s->mb_type[xy];
2870 /* write gob / video packet header */
2872 int current_packet_size, is_gob_start;
2874 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2876 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2878 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2880 switch(s->codec_id){
2881 case AV_CODEC_ID_H263:
2882 case AV_CODEC_ID_H263P:
2883 if(!s->h263_slice_structured)
2884 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2886 case AV_CODEC_ID_MPEG2VIDEO:
2887 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2888 case AV_CODEC_ID_MPEG1VIDEO:
2889 if(s->mb_skip_run) is_gob_start=0;
2891 case AV_CODEC_ID_MJPEG:
2892 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2897 if(s->start_mb_y != mb_y || mb_x!=0){
2900 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2901 ff_mpeg4_init_partitions(s);
2905 av_assert2((put_bits_count(&s->pb)&7) == 0);
2906 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2908 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2909 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2910 int d = 100 / s->error_rate;
2912 current_packet_size=0;
2913 s->pb.buf_ptr= s->ptr_lastgob;
2914 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2918 if (s->avctx->rtp_callback){
2919 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2920 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2922 update_mb_info(s, 1);
2924 switch(s->codec_id){
2925 case AV_CODEC_ID_MPEG4:
2926 if (CONFIG_MPEG4_ENCODER) {
2927 ff_mpeg4_encode_video_packet_header(s);
2928 ff_mpeg4_clean_buffers(s);
2931 case AV_CODEC_ID_MPEG1VIDEO:
2932 case AV_CODEC_ID_MPEG2VIDEO:
2933 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2934 ff_mpeg1_encode_slice_header(s);
2935 ff_mpeg1_clean_buffers(s);
2938 case AV_CODEC_ID_H263:
2939 case AV_CODEC_ID_H263P:
2940 if (CONFIG_H263_ENCODER)
2941 ff_h263_encode_gob_header(s, mb_y);
2945 if (s->avctx->flags & CODEC_FLAG_PASS1) {
2946 int bits= put_bits_count(&s->pb);
2947 s->misc_bits+= bits - s->last_bits;
2951 s->ptr_lastgob += current_packet_size;
2952 s->first_slice_line=1;
2953 s->resync_mb_x=mb_x;
2954 s->resync_mb_y=mb_y;
2958 if( (s->resync_mb_x == s->mb_x)
2959 && s->resync_mb_y+1 == s->mb_y){
2960 s->first_slice_line=0;
2964 s->dquant=0; //only for QP_RD
2966 update_mb_info(s, 0);
2968 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2970 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2972 copy_context_before_encode(&backup_s, s, -1);
2974 best_s.data_partitioning= s->data_partitioning;
2975 best_s.partitioned_frame= s->partitioned_frame;
2976 if(s->data_partitioning){
2977 backup_s.pb2= s->pb2;
2978 backup_s.tex_pb= s->tex_pb;
2981 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2982 s->mv_dir = MV_DIR_FORWARD;
2983 s->mv_type = MV_TYPE_16X16;
2985 s->mv[0][0][0] = s->p_mv_table[xy][0];
2986 s->mv[0][0][1] = s->p_mv_table[xy][1];
2987 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2988 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2990 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2991 s->mv_dir = MV_DIR_FORWARD;
2992 s->mv_type = MV_TYPE_FIELD;
2995 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2996 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2997 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2999 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3000 &dmin, &next_block, 0, 0);
3002 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3003 s->mv_dir = MV_DIR_FORWARD;
3004 s->mv_type = MV_TYPE_16X16;
3008 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3009 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3011 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3012 s->mv_dir = MV_DIR_FORWARD;
3013 s->mv_type = MV_TYPE_8X8;
3016 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3017 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3019 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3020 &dmin, &next_block, 0, 0);
3022 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3023 s->mv_dir = MV_DIR_FORWARD;
3024 s->mv_type = MV_TYPE_16X16;
3026 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3027 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3028 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3029 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3031 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3032 s->mv_dir = MV_DIR_BACKWARD;
3033 s->mv_type = MV_TYPE_16X16;
3035 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3036 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3037 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3038 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3040 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3041 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3042 s->mv_type = MV_TYPE_16X16;
3044 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3045 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3046 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3047 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3048 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3049 &dmin, &next_block, 0, 0);
3051 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3052 s->mv_dir = MV_DIR_FORWARD;
3053 s->mv_type = MV_TYPE_FIELD;
3056 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3057 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3058 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3060 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3061 &dmin, &next_block, 0, 0);
3063 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3064 s->mv_dir = MV_DIR_BACKWARD;
3065 s->mv_type = MV_TYPE_FIELD;
3068 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3069 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3070 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3072 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3073 &dmin, &next_block, 0, 0);
3075 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3076 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3077 s->mv_type = MV_TYPE_FIELD;
3079 for(dir=0; dir<2; dir++){
3081 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3082 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3083 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3086 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3087 &dmin, &next_block, 0, 0);
3089 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3091 s->mv_type = MV_TYPE_16X16;
3095 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3096 &dmin, &next_block, 0, 0);
3097 if(s->h263_pred || s->h263_aic){
3099 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3101 ff_clean_intra_table_entries(s); //old mode?
3105 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3106 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3107 const int last_qp= backup_s.qscale;
3110 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3111 static const int dquant_tab[4]={-1,1,-2,2};
3112 int storecoefs = s->mb_intra && s->dc_val[0];
3114 av_assert2(backup_s.dquant == 0);
3117 s->mv_dir= best_s.mv_dir;
3118 s->mv_type = MV_TYPE_16X16;
3119 s->mb_intra= best_s.mb_intra;
3120 s->mv[0][0][0] = best_s.mv[0][0][0];
3121 s->mv[0][0][1] = best_s.mv[0][0][1];
3122 s->mv[1][0][0] = best_s.mv[1][0][0];
3123 s->mv[1][0][1] = best_s.mv[1][0][1];
3125 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3126 for(; qpi<4; qpi++){
3127 int dquant= dquant_tab[qpi];
3128 qp= last_qp + dquant;
3129 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3131 backup_s.dquant= dquant;
3134 dc[i]= s->dc_val[0][ s->block_index[i] ];
3135 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3139 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3140 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3141 if(best_s.qscale != qp){
3144 s->dc_val[0][ s->block_index[i] ]= dc[i];
3145 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3152 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3153 int mx= s->b_direct_mv_table[xy][0];
3154 int my= s->b_direct_mv_table[xy][1];
3156 backup_s.dquant = 0;
3157 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3159 ff_mpeg4_set_direct_mv(s, mx, my);
3160 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3161 &dmin, &next_block, mx, my);
3163 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3164 backup_s.dquant = 0;
3165 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3167 ff_mpeg4_set_direct_mv(s, 0, 0);
3168 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3169 &dmin, &next_block, 0, 0);
3171 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3174 coded |= s->block_last_index[i];
3177 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3178 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3179 mx=my=0; //FIXME find the one we actually used
3180 ff_mpeg4_set_direct_mv(s, mx, my);
3181 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3189 s->mv_dir= best_s.mv_dir;
3190 s->mv_type = best_s.mv_type;
3192 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3193 s->mv[0][0][1] = best_s.mv[0][0][1];
3194 s->mv[1][0][0] = best_s.mv[1][0][0];
3195 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3198 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3199 &dmin, &next_block, mx, my);
3204 s->current_picture.qscale_table[xy] = best_s.qscale;
3206 copy_context_after_encode(s, &best_s, -1);
3208 pb_bits_count= put_bits_count(&s->pb);
3209 flush_put_bits(&s->pb);
3210 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3213 if(s->data_partitioning){
3214 pb2_bits_count= put_bits_count(&s->pb2);
3215 flush_put_bits(&s->pb2);
3216 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3217 s->pb2= backup_s.pb2;
3219 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3220 flush_put_bits(&s->tex_pb);
3221 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3222 s->tex_pb= backup_s.tex_pb;
3224 s->last_bits= put_bits_count(&s->pb);
3226 if (CONFIG_H263_ENCODER &&
3227 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3228 ff_h263_update_motion_val(s);
3230 if(next_block==0){ //FIXME 16 vs linesize16
3231 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3232 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3233 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3236 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3237 ff_mpv_decode_mb(s, s->block);
3239 int motion_x = 0, motion_y = 0;
3240 s->mv_type=MV_TYPE_16X16;
3241 // only one MB-Type possible
3244 case CANDIDATE_MB_TYPE_INTRA:
3247 motion_x= s->mv[0][0][0] = 0;
3248 motion_y= s->mv[0][0][1] = 0;
3250 case CANDIDATE_MB_TYPE_INTER:
3251 s->mv_dir = MV_DIR_FORWARD;
3253 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3254 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3256 case CANDIDATE_MB_TYPE_INTER_I:
3257 s->mv_dir = MV_DIR_FORWARD;
3258 s->mv_type = MV_TYPE_FIELD;
3261 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3262 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3263 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3266 case CANDIDATE_MB_TYPE_INTER4V:
3267 s->mv_dir = MV_DIR_FORWARD;
3268 s->mv_type = MV_TYPE_8X8;
3271 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3272 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3275 case CANDIDATE_MB_TYPE_DIRECT:
3276 if (CONFIG_MPEG4_ENCODER) {
3277 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3279 motion_x=s->b_direct_mv_table[xy][0];
3280 motion_y=s->b_direct_mv_table[xy][1];
3281 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3284 case CANDIDATE_MB_TYPE_DIRECT0:
3285 if (CONFIG_MPEG4_ENCODER) {
3286 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3288 ff_mpeg4_set_direct_mv(s, 0, 0);
3291 case CANDIDATE_MB_TYPE_BIDIR:
3292 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3294 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3295 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3296 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3297 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3299 case CANDIDATE_MB_TYPE_BACKWARD:
3300 s->mv_dir = MV_DIR_BACKWARD;
3302 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3303 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3305 case CANDIDATE_MB_TYPE_FORWARD:
3306 s->mv_dir = MV_DIR_FORWARD;
3308 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3309 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3311 case CANDIDATE_MB_TYPE_FORWARD_I:
3312 s->mv_dir = MV_DIR_FORWARD;
3313 s->mv_type = MV_TYPE_FIELD;
3316 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3317 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3318 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3321 case CANDIDATE_MB_TYPE_BACKWARD_I:
3322 s->mv_dir = MV_DIR_BACKWARD;
3323 s->mv_type = MV_TYPE_FIELD;
3326 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3327 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3328 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3331 case CANDIDATE_MB_TYPE_BIDIR_I:
3332 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3333 s->mv_type = MV_TYPE_FIELD;
3335 for(dir=0; dir<2; dir++){
3337 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3338 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3339 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3344 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3347 encode_mb(s, motion_x, motion_y);
3349 // RAL: Update last macroblock type
3350 s->last_mv_dir = s->mv_dir;
3352 if (CONFIG_H263_ENCODER &&
3353 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3354 ff_h263_update_motion_val(s);
3356 ff_mpv_decode_mb(s, s->block);
3359 /* clean the MV table in IPS frames for direct mode in B frames */
3360 if(s->mb_intra /* && I,P,S_TYPE */){
3361 s->p_mv_table[xy][0]=0;
3362 s->p_mv_table[xy][1]=0;
3365 if (s->avctx->flags & CODEC_FLAG_PSNR) {
3369 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3370 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3372 s->current_picture.error[0] += sse(
3373 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3374 s->dest[0], w, h, s->linesize);
3375 s->current_picture.error[1] += sse(
3376 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3377 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3378 s->current_picture.error[2] += sse(
3379 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3380 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3383 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3384 ff_h263_loop_filter(s);
3386 ff_dlog(s->avctx, "MB %d %d bits\n",
3387 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3391 //not beautiful here but we must write it before flushing so it has to be here
3392 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3393 ff_msmpeg4_encode_ext_header(s);
3397 /* Send the last GOB if RTP */
3398 if (s->avctx->rtp_callback) {
3399 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3400 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3401 /* Call the RTP callback to send the last GOB */
3403 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3409 #define MERGE(field) dst->field += src->field; src->field=0
3410 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3411 MERGE(me.scene_change_score);
3412 MERGE(me.mc_mb_var_sum_temp);
3413 MERGE(me.mb_var_sum_temp);
3416 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3419 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3420 MERGE(dct_count[1]);
3429 MERGE(er.error_count);
3430 MERGE(padding_bug_score);
3431 MERGE(current_picture.error[0]);
3432 MERGE(current_picture.error[1]);
3433 MERGE(current_picture.error[2]);
3435 if(dst->avctx->noise_reduction){
3436 for(i=0; i<64; i++){
3437 MERGE(dct_error_sum[0][i]);
3438 MERGE(dct_error_sum[1][i]);
3442 assert(put_bits_count(&src->pb) % 8 ==0);
3443 assert(put_bits_count(&dst->pb) % 8 ==0);
3444 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3445 flush_put_bits(&dst->pb);
3448 static int estimate_qp(MpegEncContext *s, int dry_run){
3449 if (s->next_lambda){
3450 s->current_picture_ptr->f->quality =
3451 s->current_picture.f->quality = s->next_lambda;
3452 if(!dry_run) s->next_lambda= 0;
3453 } else if (!s->fixed_qscale) {
3454 s->current_picture_ptr->f->quality =
3455 s->current_picture.f->quality = ff_rate_estimate_qscale(s, dry_run);
3456 if (s->current_picture.f->quality < 0)
3460 if(s->adaptive_quant){
3461 switch(s->codec_id){
3462 case AV_CODEC_ID_MPEG4:
3463 if (CONFIG_MPEG4_ENCODER)
3464 ff_clean_mpeg4_qscales(s);
3466 case AV_CODEC_ID_H263:
3467 case AV_CODEC_ID_H263P:
3468 case AV_CODEC_ID_FLV1:
3469 if (CONFIG_H263_ENCODER)
3470 ff_clean_h263_qscales(s);
3473 ff_init_qscale_tab(s);
3476 s->lambda= s->lambda_table[0];
3479 s->lambda = s->current_picture.f->quality;
3484 /* must be called before writing the header */
3485 static void set_frame_distances(MpegEncContext * s){
3486 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3487 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3489 if(s->pict_type==AV_PICTURE_TYPE_B){
3490 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3491 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3493 s->pp_time= s->time - s->last_non_b_time;
3494 s->last_non_b_time= s->time;
3495 assert(s->picture_number==0 || s->pp_time > 0);
3499 static int encode_picture(MpegEncContext *s, int picture_number)
3503 int context_count = s->slice_context_count;
3505 s->picture_number = picture_number;
3507 /* Reset the average MB variance */
3508 s->me.mb_var_sum_temp =
3509 s->me.mc_mb_var_sum_temp = 0;
3511 /* we need to initialize some time vars before we can encode b-frames */
3512 // RAL: Condition added for MPEG1VIDEO
3513 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3514 set_frame_distances(s);
3515 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3516 ff_set_mpeg4_time(s);
3518 s->me.scene_change_score=0;
3520 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3522 if(s->pict_type==AV_PICTURE_TYPE_I){
3523 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3524 else s->no_rounding=0;
3525 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3526 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3527 s->no_rounding ^= 1;
3530 if (s->avctx->flags & CODEC_FLAG_PASS2) {
3531 if (estimate_qp(s,1) < 0)
3533 ff_get_2pass_fcode(s);
3534 } else if (!(s->avctx->flags & CODEC_FLAG_QSCALE)) {
3535 if(s->pict_type==AV_PICTURE_TYPE_B)
3536 s->lambda= s->last_lambda_for[s->pict_type];
3538 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3542 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3543 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3544 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3545 s->q_chroma_intra_matrix = s->q_intra_matrix;
3546 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3549 s->mb_intra=0; //for the rate distortion & bit compare functions
3550 for(i=1; i<context_count; i++){
3551 ret = ff_update_duplicate_context(s->thread_context[i], s);
3559 /* Estimate motion for every MB */
3560 if(s->pict_type != AV_PICTURE_TYPE_I){
3561 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3562 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3563 if (s->pict_type != AV_PICTURE_TYPE_B) {
3564 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3565 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3569 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3570 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3572 for(i=0; i<s->mb_stride*s->mb_height; i++)
3573 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3575 if(!s->fixed_qscale){
3576 /* finding spatial complexity for I-frame rate control */
3577 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3580 for(i=1; i<context_count; i++){
3581 merge_context_after_me(s, s->thread_context[i]);
3583 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3584 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3587 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3588 s->pict_type= AV_PICTURE_TYPE_I;
3589 for(i=0; i<s->mb_stride*s->mb_height; i++)
3590 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3591 if(s->msmpeg4_version >= 3)
3593 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3594 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3598 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3599 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3601 if (s->avctx->flags & CODEC_FLAG_INTERLACED_ME) {
3603 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3604 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3605 s->f_code= FFMAX3(s->f_code, a, b);
3608 ff_fix_long_p_mvs(s);
3609 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3610 if (s->avctx->flags & CODEC_FLAG_INTERLACED_ME) {
3614 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3615 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3620 if(s->pict_type==AV_PICTURE_TYPE_B){
3623 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3624 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3625 s->f_code = FFMAX(a, b);
3627 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3628 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3629 s->b_code = FFMAX(a, b);
3631 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3632 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3633 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3634 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3635 if (s->avctx->flags & CODEC_FLAG_INTERLACED_ME) {
3637 for(dir=0; dir<2; dir++){
3640 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3641 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3642 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3643 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3651 if (estimate_qp(s, 0) < 0)
3654 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3655 s->pict_type == AV_PICTURE_TYPE_I &&
3656 !(s->avctx->flags & CODEC_FLAG_QSCALE))
3657 s->qscale= 3; //reduce clipping problems
3659 if (s->out_format == FMT_MJPEG) {
3660 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3661 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3663 if (s->avctx->intra_matrix) {
3665 luma_matrix = s->avctx->intra_matrix;
3667 if (s->avctx->chroma_intra_matrix)
3668 chroma_matrix = s->avctx->chroma_intra_matrix;
3670 /* for mjpeg, we do include qscale in the matrix */
3672 int j = s->idsp.idct_permutation[i];
3674 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3675 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3677 s->y_dc_scale_table=
3678 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3679 s->chroma_intra_matrix[0] =
3680 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3681 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3682 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3683 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3684 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3687 if(s->codec_id == AV_CODEC_ID_AMV){
3688 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3689 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3691 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3693 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3694 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3696 s->y_dc_scale_table= y;
3697 s->c_dc_scale_table= c;
3698 s->intra_matrix[0] = 13;
3699 s->chroma_intra_matrix[0] = 14;
3700 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3701 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3702 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3703 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3707 //FIXME var duplication
3708 s->current_picture_ptr->f->key_frame =
3709 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3710 s->current_picture_ptr->f->pict_type =
3711 s->current_picture.f->pict_type = s->pict_type;
3713 if (s->current_picture.f->key_frame)
3714 s->picture_in_gop_number=0;
3716 s->mb_x = s->mb_y = 0;
3717 s->last_bits= put_bits_count(&s->pb);
3718 switch(s->out_format) {
3720 if (CONFIG_MJPEG_ENCODER)
3721 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3722 s->intra_matrix, s->chroma_intra_matrix);
3725 if (CONFIG_H261_ENCODER)
3726 ff_h261_encode_picture_header(s, picture_number);
3729 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3730 ff_wmv2_encode_picture_header(s, picture_number);
3731 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3732 ff_msmpeg4_encode_picture_header(s, picture_number);
3733 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3734 ff_mpeg4_encode_picture_header(s, picture_number);
3735 else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3736 ret = ff_rv10_encode_picture_header(s, picture_number);
3740 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3741 ff_rv20_encode_picture_header(s, picture_number);
3742 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3743 ff_flv_encode_picture_header(s, picture_number);
3744 else if (CONFIG_H263_ENCODER)
3745 ff_h263_encode_picture_header(s, picture_number);
3748 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3749 ff_mpeg1_encode_picture_header(s, picture_number);
3754 bits= put_bits_count(&s->pb);
3755 s->header_bits= bits - s->last_bits;
3757 for(i=1; i<context_count; i++){
3758 update_duplicate_context_after_me(s->thread_context[i], s);
3760 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3761 for(i=1; i<context_count; i++){
3762 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3763 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3764 merge_context_after_encode(s, s->thread_context[i]);
3770 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3771 const int intra= s->mb_intra;
3774 s->dct_count[intra]++;
3776 for(i=0; i<64; i++){
3777 int level= block[i];
3781 s->dct_error_sum[intra][i] += level;
3782 level -= s->dct_offset[intra][i];
3783 if(level<0) level=0;
3785 s->dct_error_sum[intra][i] -= level;
3786 level += s->dct_offset[intra][i];
3787 if(level>0) level=0;
3794 static int dct_quantize_trellis_c(MpegEncContext *s,
3795 int16_t *block, int n,
3796 int qscale, int *overflow){
3798 const uint16_t *matrix;
3799 const uint8_t *scantable= s->intra_scantable.scantable;
3800 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3802 unsigned int threshold1, threshold2;
3814 int coeff_count[64];
3815 int qmul, qadd, start_i, last_non_zero, i, dc;
3816 const int esc_length= s->ac_esc_length;
3818 uint8_t * last_length;
3819 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3821 s->fdsp.fdct(block);
3823 if(s->dct_error_sum)
3824 s->denoise_dct(s, block);
3826 qadd= ((qscale-1)|1)*8;
3837 /* For AIC we skip quant/dequant of INTRADC */
3842 /* note: block[0] is assumed to be positive */
3843 block[0] = (block[0] + (q >> 1)) / q;
3846 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3847 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3848 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3849 bias= 1<<(QMAT_SHIFT-1);
3851 if (n > 3 && s->intra_chroma_ac_vlc_length) {
3852 length = s->intra_chroma_ac_vlc_length;
3853 last_length= s->intra_chroma_ac_vlc_last_length;
3855 length = s->intra_ac_vlc_length;
3856 last_length= s->intra_ac_vlc_last_length;
3861 qmat = s->q_inter_matrix[qscale];
3862 matrix = s->inter_matrix;
3863 length = s->inter_ac_vlc_length;
3864 last_length= s->inter_ac_vlc_last_length;
3868 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3869 threshold2= (threshold1<<1);
3871 for(i=63; i>=start_i; i--) {
3872 const int j = scantable[i];
3873 int level = block[j] * qmat[j];
3875 if(((unsigned)(level+threshold1))>threshold2){
3881 for(i=start_i; i<=last_non_zero; i++) {
3882 const int j = scantable[i];
3883 int level = block[j] * qmat[j];
3885 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3886 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3887 if(((unsigned)(level+threshold1))>threshold2){
3889 level= (bias + level)>>QMAT_SHIFT;
3891 coeff[1][i]= level-1;
3892 // coeff[2][k]= level-2;
3894 level= (bias - level)>>QMAT_SHIFT;
3895 coeff[0][i]= -level;
3896 coeff[1][i]= -level+1;
3897 // coeff[2][k]= -level+2;
3899 coeff_count[i]= FFMIN(level, 2);
3900 av_assert2(coeff_count[i]);
3903 coeff[0][i]= (level>>31)|1;
3908 *overflow= s->max_qcoeff < max; //overflow might have happened
3910 if(last_non_zero < start_i){
3911 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3912 return last_non_zero;
3915 score_tab[start_i]= 0;
3916 survivor[0]= start_i;
3919 for(i=start_i; i<=last_non_zero; i++){
3920 int level_index, j, zero_distortion;
3921 int dct_coeff= FFABS(block[ scantable[i] ]);
3922 int best_score=256*256*256*120;
3924 if (s->fdsp.fdct == ff_fdct_ifast)
3925 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3926 zero_distortion= dct_coeff*dct_coeff;
3928 for(level_index=0; level_index < coeff_count[i]; level_index++){
3930 int level= coeff[level_index][i];
3931 const int alevel= FFABS(level);
3936 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
3937 unquant_coeff= alevel*qmul + qadd;
3938 } else if(s->out_format == FMT_MJPEG) {
3939 j = s->idsp.idct_permutation[scantable[i]];
3940 unquant_coeff = alevel * matrix[j] * 8;
3942 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
3944 unquant_coeff = (int)( alevel * qscale * matrix[j]) >> 3;
3945 unquant_coeff = (unquant_coeff - 1) | 1;
3947 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) matrix[j])) >> 4;
3948 unquant_coeff = (unquant_coeff - 1) | 1;
3953 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3955 if((level&(~127)) == 0){
3956 for(j=survivor_count-1; j>=0; j--){
3957 int run= i - survivor[j];
3958 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3959 score += score_tab[i-run];
3961 if(score < best_score){
3964 level_tab[i+1]= level-64;
3968 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
3969 for(j=survivor_count-1; j>=0; j--){
3970 int run= i - survivor[j];
3971 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3972 score += score_tab[i-run];
3973 if(score < last_score){
3976 last_level= level-64;
3982 distortion += esc_length*lambda;
3983 for(j=survivor_count-1; j>=0; j--){
3984 int run= i - survivor[j];
3985 int score= distortion + score_tab[i-run];
3987 if(score < best_score){
3990 level_tab[i+1]= level-64;
3994 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
3995 for(j=survivor_count-1; j>=0; j--){
3996 int run= i - survivor[j];
3997 int score= distortion + score_tab[i-run];
3998 if(score < last_score){
4001 last_level= level-64;
4009 score_tab[i+1]= best_score;
4011 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
4012 if(last_non_zero <= 27){
4013 for(; survivor_count; survivor_count--){
4014 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4018 for(; survivor_count; survivor_count--){
4019 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4024 survivor[ survivor_count++ ]= i+1;
4027 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4028 last_score= 256*256*256*120;
4029 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4030 int score= score_tab[i];
4031 if(i) score += lambda*2; //FIXME exacter?
4033 if(score < last_score){
4036 last_level= level_tab[i];
4037 last_run= run_tab[i];
4042 s->coded_score[n] = last_score;
4044 dc= FFABS(block[0]);
4045 last_non_zero= last_i - 1;
4046 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4048 if(last_non_zero < start_i)
4049 return last_non_zero;
4051 if(last_non_zero == 0 && start_i == 0){
4053 int best_score= dc * dc;
4055 for(i=0; i<coeff_count[0]; i++){
4056 int level= coeff[i][0];
4057 int alevel= FFABS(level);
4058 int unquant_coeff, score, distortion;
4060 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4061 unquant_coeff= (alevel*qmul + qadd)>>3;
4063 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) matrix[0])) >> 4;
4064 unquant_coeff = (unquant_coeff - 1) | 1;
4066 unquant_coeff = (unquant_coeff + 4) >> 3;
4067 unquant_coeff<<= 3 + 3;
4069 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4071 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4072 else score= distortion + esc_length*lambda;
4074 if(score < best_score){
4076 best_level= level - 64;
4079 block[0]= best_level;
4080 s->coded_score[n] = best_score - dc*dc;
4081 if(best_level == 0) return -1;
4082 else return last_non_zero;
4086 av_assert2(last_level);
4088 block[ perm_scantable[last_non_zero] ]= last_level;
4091 for(; i>start_i; i -= run_tab[i] + 1){
4092 block[ perm_scantable[i-1] ]= level_tab[i];
4095 return last_non_zero;
4098 //#define REFINE_STATS 1
4099 static int16_t basis[64][64];
4101 static void build_basis(uint8_t *perm){
4108 double s= 0.25*(1<<BASIS_SHIFT);
4110 int perm_index= perm[index];
4111 if(i==0) s*= sqrt(0.5);
4112 if(j==0) s*= sqrt(0.5);
4113 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4120 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4121 int16_t *block, int16_t *weight, int16_t *orig,
4124 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4125 const uint8_t *scantable= s->intra_scantable.scantable;
4126 const uint8_t *perm_scantable= s->intra_scantable.permutated;
4127 // unsigned int threshold1, threshold2;
4132 int qmul, qadd, start_i, last_non_zero, i, dc;
4134 uint8_t * last_length;
4136 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4139 static int after_last=0;
4140 static int to_zero=0;
4141 static int from_zero=0;
4144 static int messed_sign=0;
4147 if(basis[0][0] == 0)
4148 build_basis(s->idsp.idct_permutation);
4159 /* For AIC we skip quant/dequant of INTRADC */
4163 q <<= RECON_SHIFT-3;
4164 /* note: block[0] is assumed to be positive */
4166 // block[0] = (block[0] + (q >> 1)) / q;
4168 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4169 // bias= 1<<(QMAT_SHIFT-1);
4170 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4171 length = s->intra_chroma_ac_vlc_length;
4172 last_length= s->intra_chroma_ac_vlc_last_length;
4174 length = s->intra_ac_vlc_length;
4175 last_length= s->intra_ac_vlc_last_length;
4180 length = s->inter_ac_vlc_length;
4181 last_length= s->inter_ac_vlc_last_length;
4183 last_non_zero = s->block_last_index[n];
4188 dc += (1<<(RECON_SHIFT-1));
4189 for(i=0; i<64; i++){
4190 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
4193 STOP_TIMER("memset rem[]")}
4196 for(i=0; i<64; i++){
4201 w= FFABS(weight[i]) + qns*one;
4202 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4205 // w=weight[i] = (63*qns + (w/2)) / w;
4208 av_assert2(w<(1<<6));
4211 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4217 for(i=start_i; i<=last_non_zero; i++){
4218 int j= perm_scantable[i];
4219 const int level= block[j];
4223 if(level<0) coeff= qmul*level - qadd;
4224 else coeff= qmul*level + qadd;
4225 run_tab[rle_index++]=run;
4228 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4234 if(last_non_zero>0){
4235 STOP_TIMER("init rem[]")
4242 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4245 int run2, best_unquant_change=0, analyze_gradient;
4249 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4251 if(analyze_gradient){
4255 for(i=0; i<64; i++){
4258 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4261 STOP_TIMER("rem*w*w")}
4271 const int level= block[0];
4272 int change, old_coeff;
4274 av_assert2(s->mb_intra);
4278 for(change=-1; change<=1; change+=2){
4279 int new_level= level + change;
4280 int score, new_coeff;
4282 new_coeff= q*new_level;
4283 if(new_coeff >= 2048 || new_coeff < 0)
4286 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4287 new_coeff - old_coeff);
4288 if(score<best_score){
4291 best_change= change;
4292 best_unquant_change= new_coeff - old_coeff;
4299 run2= run_tab[rle_index++];
4303 for(i=start_i; i<64; i++){
4304 int j= perm_scantable[i];
4305 const int level= block[j];
4306 int change, old_coeff;
4308 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4312 if(level<0) old_coeff= qmul*level - qadd;
4313 else old_coeff= qmul*level + qadd;
4314 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4318 av_assert2(run2>=0 || i >= last_non_zero );
4321 for(change=-1; change<=1; change+=2){
4322 int new_level= level + change;
4323 int score, new_coeff, unquant_change;
4326 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4330 if(new_level<0) new_coeff= qmul*new_level - qadd;
4331 else new_coeff= qmul*new_level + qadd;
4332 if(new_coeff >= 2048 || new_coeff <= -2048)
4334 //FIXME check for overflow
4337 if(level < 63 && level > -63){
4338 if(i < last_non_zero)
4339 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4340 - length[UNI_AC_ENC_INDEX(run, level+64)];
4342 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4343 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4346 av_assert2(FFABS(new_level)==1);
4348 if(analyze_gradient){
4349 int g= d1[ scantable[i] ];
4350 if(g && (g^new_level) >= 0)
4354 if(i < last_non_zero){
4355 int next_i= i + run2 + 1;
4356 int next_level= block[ perm_scantable[next_i] ] + 64;
4358 if(next_level&(~127))
4361 if(next_i < last_non_zero)
4362 score += length[UNI_AC_ENC_INDEX(run, 65)]
4363 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4364 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4366 score += length[UNI_AC_ENC_INDEX(run, 65)]
4367 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4368 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4370 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4372 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4373 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4379 av_assert2(FFABS(level)==1);
4381 if(i < last_non_zero){
4382 int next_i= i + run2 + 1;
4383 int next_level= block[ perm_scantable[next_i] ] + 64;
4385 if(next_level&(~127))
4388 if(next_i < last_non_zero)
4389 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4390 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4391 - length[UNI_AC_ENC_INDEX(run, 65)];
4393 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4394 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4395 - length[UNI_AC_ENC_INDEX(run, 65)];
4397 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4399 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4400 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4407 unquant_change= new_coeff - old_coeff;
4408 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4410 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4412 if(score<best_score){
4415 best_change= change;
4416 best_unquant_change= unquant_change;
4420 prev_level= level + 64;
4421 if(prev_level&(~127))
4430 STOP_TIMER("iterative step")}
4434 int j= perm_scantable[ best_coeff ];
4436 block[j] += best_change;
4438 if(best_coeff > last_non_zero){
4439 last_non_zero= best_coeff;
4440 av_assert2(block[j]);
4447 if(block[j] - best_change){
4448 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4460 for(; last_non_zero>=start_i; last_non_zero--){
4461 if(block[perm_scantable[last_non_zero]])
4467 if(256*256*256*64 % count == 0){
4468 av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4473 for(i=start_i; i<=last_non_zero; i++){
4474 int j= perm_scantable[i];
4475 const int level= block[j];
4478 run_tab[rle_index++]=run;
4485 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4491 if(last_non_zero>0){
4492 STOP_TIMER("iterative search")
4497 return last_non_zero;
4501 * Permute an 8x8 block according to permuatation.
4502 * @param block the block which will be permuted according to
4503 * the given permutation vector
4504 * @param permutation the permutation vector
4505 * @param last the last non zero coefficient in scantable order, used to
4506 * speed the permutation up
4507 * @param scantable the used scantable, this is only used to speed the
4508 * permutation up, the block is not (inverse) permutated
4509 * to scantable order!
4511 static void block_permute(int16_t *block, uint8_t *permutation,
4512 const uint8_t *scantable, int last)
4519 //FIXME it is ok but not clean and might fail for some permutations
4520 // if (permutation[1] == 1)
4523 for (i = 0; i <= last; i++) {
4524 const int j = scantable[i];
4529 for (i = 0; i <= last; i++) {
4530 const int j = scantable[i];
4531 const int perm_j = permutation[j];
4532 block[perm_j] = temp[j];
4536 int ff_dct_quantize_c(MpegEncContext *s,
4537 int16_t *block, int n,
4538 int qscale, int *overflow)
4540 int i, j, level, last_non_zero, q, start_i;
4542 const uint8_t *scantable= s->intra_scantable.scantable;
4545 unsigned int threshold1, threshold2;
4547 s->fdsp.fdct(block);
4549 if(s->dct_error_sum)
4550 s->denoise_dct(s, block);
4560 /* For AIC we skip quant/dequant of INTRADC */
4563 /* note: block[0] is assumed to be positive */
4564 block[0] = (block[0] + (q >> 1)) / q;
4567 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4568 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4572 qmat = s->q_inter_matrix[qscale];
4573 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4575 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4576 threshold2= (threshold1<<1);
4577 for(i=63;i>=start_i;i--) {
4579 level = block[j] * qmat[j];
4581 if(((unsigned)(level+threshold1))>threshold2){
4588 for(i=start_i; i<=last_non_zero; i++) {
4590 level = block[j] * qmat[j];
4592 // if( bias+level >= (1<<QMAT_SHIFT)
4593 // || bias-level >= (1<<QMAT_SHIFT)){
4594 if(((unsigned)(level+threshold1))>threshold2){
4596 level= (bias + level)>>QMAT_SHIFT;
4599 level= (bias - level)>>QMAT_SHIFT;
4607 *overflow= s->max_qcoeff < max; //overflow might have happened
4609 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4610 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4611 block_permute(block, s->idsp.idct_permutation,
4612 scantable, last_non_zero);
4614 return last_non_zero;
4617 #define OFFSET(x) offsetof(MpegEncContext, x)
4618 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4619 static const AVOption h263_options[] = {
4620 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4621 { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4622 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4627 static const AVClass h263_class = {
4628 .class_name = "H.263 encoder",
4629 .item_name = av_default_item_name,
4630 .option = h263_options,
4631 .version = LIBAVUTIL_VERSION_INT,
4634 AVCodec ff_h263_encoder = {
4636 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4637 .type = AVMEDIA_TYPE_VIDEO,
4638 .id = AV_CODEC_ID_H263,
4639 .priv_data_size = sizeof(MpegEncContext),
4640 .init = ff_mpv_encode_init,
4641 .encode2 = ff_mpv_encode_picture,
4642 .close = ff_mpv_encode_end,
4643 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4644 .priv_class = &h263_class,
4647 static const AVOption h263p_options[] = {
4648 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4649 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4650 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4651 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4655 static const AVClass h263p_class = {
4656 .class_name = "H.263p encoder",
4657 .item_name = av_default_item_name,
4658 .option = h263p_options,
4659 .version = LIBAVUTIL_VERSION_INT,
4662 AVCodec ff_h263p_encoder = {
4664 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4665 .type = AVMEDIA_TYPE_VIDEO,
4666 .id = AV_CODEC_ID_H263P,
4667 .priv_data_size = sizeof(MpegEncContext),
4668 .init = ff_mpv_encode_init,
4669 .encode2 = ff_mpv_encode_picture,
4670 .close = ff_mpv_encode_end,
4671 .capabilities = CODEC_CAP_SLICE_THREADS,
4672 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4673 .priv_class = &h263p_class,
4676 static const AVClass msmpeg4v2_class = {
4677 .class_name = "msmpeg4v2 encoder",
4678 .item_name = av_default_item_name,
4679 .option = ff_mpv_generic_options,
4680 .version = LIBAVUTIL_VERSION_INT,
4683 AVCodec ff_msmpeg4v2_encoder = {
4684 .name = "msmpeg4v2",
4685 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4686 .type = AVMEDIA_TYPE_VIDEO,
4687 .id = AV_CODEC_ID_MSMPEG4V2,
4688 .priv_data_size = sizeof(MpegEncContext),
4689 .init = ff_mpv_encode_init,
4690 .encode2 = ff_mpv_encode_picture,
4691 .close = ff_mpv_encode_end,
4692 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4693 .priv_class = &msmpeg4v2_class,
4696 static const AVClass msmpeg4v3_class = {
4697 .class_name = "msmpeg4v3 encoder",
4698 .item_name = av_default_item_name,
4699 .option = ff_mpv_generic_options,
4700 .version = LIBAVUTIL_VERSION_INT,
4703 AVCodec ff_msmpeg4v3_encoder = {
4705 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4706 .type = AVMEDIA_TYPE_VIDEO,
4707 .id = AV_CODEC_ID_MSMPEG4V3,
4708 .priv_data_size = sizeof(MpegEncContext),
4709 .init = ff_mpv_encode_init,
4710 .encode2 = ff_mpv_encode_picture,
4711 .close = ff_mpv_encode_end,
4712 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4713 .priv_class = &msmpeg4v3_class,
4716 static const AVClass wmv1_class = {
4717 .class_name = "wmv1 encoder",
4718 .item_name = av_default_item_name,
4719 .option = ff_mpv_generic_options,
4720 .version = LIBAVUTIL_VERSION_INT,
4723 AVCodec ff_wmv1_encoder = {
4725 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4726 .type = AVMEDIA_TYPE_VIDEO,
4727 .id = AV_CODEC_ID_WMV1,
4728 .priv_data_size = sizeof(MpegEncContext),
4729 .init = ff_mpv_encode_init,
4730 .encode2 = ff_mpv_encode_picture,
4731 .close = ff_mpv_encode_end,
4732 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4733 .priv_class = &wmv1_class,