2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
32 #include "libavutil/internal.h"
33 #include "libavutil/intmath.h"
34 #include "libavutil/mathematics.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/timer.h"
42 #include "mpegvideo.h"
43 #include "mpegvideodata.h"
47 #include "mjpegenc_common.h"
49 #include "mpegutils.h"
52 #include "pixblockdsp.h"
56 #include "aandcttab.h"
58 #include "mpeg4video.h"
60 #include "bytestream.h"
66 #define QUANT_BIAS_SHIFT 8
68 #define QMAT_SHIFT_MMX 16
71 static int encode_picture(MpegEncContext *s, int picture_number);
72 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
73 static int sse_mb(MpegEncContext *s);
74 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
75 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
77 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
78 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
80 const AVOption ff_mpv_generic_options[] = {
85 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
86 uint16_t (*qmat16)[2][64],
87 const uint16_t *quant_matrix,
88 int bias, int qmin, int qmax, int intra)
90 FDCTDSPContext *fdsp = &s->fdsp;
94 for (qscale = qmin; qscale <= qmax; qscale++) {
96 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
98 fdsp->fdct == ff_faandct ||
99 #endif /* CONFIG_FAANDCT */
100 fdsp->fdct == ff_jpeg_fdct_islow_10) {
101 for (i = 0; i < 64; i++) {
102 const int j = s->idsp.idct_permutation[i];
103 int64_t den = (int64_t) qscale * quant_matrix[j];
104 /* 16 <= qscale * quant_matrix[i] <= 7905
105 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
106 * 19952 <= x <= 249205026
107 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
108 * 3444240 >= (1 << 36) / (x) >= 275 */
110 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / den);
112 } else if (fdsp->fdct == ff_fdct_ifast) {
113 for (i = 0; i < 64; i++) {
114 const int j = s->idsp.idct_permutation[i];
115 int64_t den = ff_aanscales[i] * (int64_t) qscale * quant_matrix[j];
116 /* 16 <= qscale * quant_matrix[i] <= 7905
117 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
118 * 19952 <= x <= 249205026
119 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
120 * 3444240 >= (1 << 36) / (x) >= 275 */
122 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) / den);
125 for (i = 0; i < 64; i++) {
126 const int j = s->idsp.idct_permutation[i];
127 int64_t den = (int64_t) qscale * quant_matrix[j];
128 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
129 * Assume x = qscale * quant_matrix[i]
131 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
132 * so 32768 >= (1 << 19) / (x) >= 67 */
133 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / den);
134 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
135 // (qscale * quant_matrix[i]);
136 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / den;
138 if (qmat16[qscale][0][i] == 0 ||
139 qmat16[qscale][0][i] == 128 * 256)
140 qmat16[qscale][0][i] = 128 * 256 - 1;
141 qmat16[qscale][1][i] =
142 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
143 qmat16[qscale][0][i]);
147 for (i = intra; i < 64; i++) {
149 if (fdsp->fdct == ff_fdct_ifast) {
150 max = (8191LL * ff_aanscales[i]) >> 14;
152 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
158 av_log(NULL, AV_LOG_INFO,
159 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
164 static inline void update_qscale(MpegEncContext *s)
166 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
167 (FF_LAMBDA_SHIFT + 7);
168 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
170 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
174 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
180 for (i = 0; i < 64; i++) {
181 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
188 * init s->current_picture.qscale_table from s->lambda_table
190 void ff_init_qscale_tab(MpegEncContext *s)
192 int8_t * const qscale_table = s->current_picture.qscale_table;
195 for (i = 0; i < s->mb_num; i++) {
196 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
197 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
198 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
203 static void update_duplicate_context_after_me(MpegEncContext *dst,
206 #define COPY(a) dst->a= src->a
208 COPY(current_picture);
214 COPY(picture_in_gop_number);
215 COPY(gop_picture_number);
216 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
217 COPY(progressive_frame); // FIXME don't set in encode_header
218 COPY(partitioned_frame); // FIXME don't set in encode_header
223 * Set the given MpegEncContext to defaults for encoding.
224 * the changed fields will not depend upon the prior state of the MpegEncContext.
226 static void mpv_encode_defaults(MpegEncContext *s)
229 ff_mpv_common_defaults(s);
231 for (i = -16; i < 16; i++) {
232 default_fcode_tab[i + MAX_MV] = 1;
234 s->me.mv_penalty = default_mv_penalty;
235 s->fcode_tab = default_fcode_tab;
237 s->input_picture_number = 0;
238 s->picture_in_gop_number = 0;
241 av_cold int ff_dct_encode_init(MpegEncContext *s) {
243 ff_dct_encode_init_x86(s);
245 if (CONFIG_H263_ENCODER)
246 ff_h263dsp_init(&s->h263dsp);
247 if (!s->dct_quantize)
248 s->dct_quantize = ff_dct_quantize_c;
250 s->denoise_dct = denoise_dct_c;
251 s->fast_dct_quantize = s->dct_quantize;
252 if (s->avctx->trellis)
253 s->dct_quantize = dct_quantize_trellis_c;
258 /* init video encoder */
259 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
261 MpegEncContext *s = avctx->priv_data;
262 int i, ret, format_supported;
264 mpv_encode_defaults(s);
266 switch (avctx->codec_id) {
267 case AV_CODEC_ID_MPEG2VIDEO:
268 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
269 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
270 av_log(avctx, AV_LOG_ERROR,
271 "only YUV420 and YUV422 are supported\n");
275 case AV_CODEC_ID_MJPEG:
276 case AV_CODEC_ID_AMV:
277 format_supported = 0;
278 /* JPEG color space */
279 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
280 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
281 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
282 (avctx->color_range == AVCOL_RANGE_JPEG &&
283 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
284 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
285 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
286 format_supported = 1;
287 /* MPEG color space */
288 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
289 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
290 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
291 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
292 format_supported = 1;
294 if (!format_supported) {
295 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
300 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
301 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
306 switch (avctx->pix_fmt) {
307 case AV_PIX_FMT_YUVJ444P:
308 case AV_PIX_FMT_YUV444P:
309 s->chroma_format = CHROMA_444;
311 case AV_PIX_FMT_YUVJ422P:
312 case AV_PIX_FMT_YUV422P:
313 s->chroma_format = CHROMA_422;
315 case AV_PIX_FMT_YUVJ420P:
316 case AV_PIX_FMT_YUV420P:
318 s->chroma_format = CHROMA_420;
322 s->bit_rate = avctx->bit_rate;
323 s->width = avctx->width;
324 s->height = avctx->height;
325 if (avctx->gop_size > 600 &&
326 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
327 av_log(avctx, AV_LOG_WARNING,
328 "keyframe interval too large!, reducing it from %d to %d\n",
329 avctx->gop_size, 600);
330 avctx->gop_size = 600;
332 s->gop_size = avctx->gop_size;
334 if (avctx->max_b_frames > MAX_B_FRAMES) {
335 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
336 "is %d.\n", MAX_B_FRAMES);
337 avctx->max_b_frames = MAX_B_FRAMES;
339 s->max_b_frames = avctx->max_b_frames;
340 s->codec_id = avctx->codec->id;
341 s->strict_std_compliance = avctx->strict_std_compliance;
342 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
343 s->mpeg_quant = avctx->mpeg_quant;
344 s->rtp_mode = !!avctx->rtp_payload_size;
345 s->intra_dc_precision = avctx->intra_dc_precision;
347 // workaround some differences between how applications specify dc precision
348 if (s->intra_dc_precision < 0) {
349 s->intra_dc_precision += 8;
350 } else if (s->intra_dc_precision >= 8)
351 s->intra_dc_precision -= 8;
353 if (s->intra_dc_precision < 0) {
354 av_log(avctx, AV_LOG_ERROR,
355 "intra dc precision must be positive, note some applications use"
356 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
357 return AVERROR(EINVAL);
360 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
361 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
362 return AVERROR(EINVAL);
364 s->user_specified_pts = AV_NOPTS_VALUE;
366 if (s->gop_size <= 1) {
373 s->me_method = avctx->me_method;
376 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
379 FF_DISABLE_DEPRECATION_WARNINGS
380 if (avctx->border_masking != 0.0)
381 s->border_masking = avctx->border_masking;
382 FF_ENABLE_DEPRECATION_WARNINGS
385 s->adaptive_quant = (s->avctx->lumi_masking ||
386 s->avctx->dark_masking ||
387 s->avctx->temporal_cplx_masking ||
388 s->avctx->spatial_cplx_masking ||
389 s->avctx->p_masking ||
391 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
394 s->loop_filter = !!(s->avctx->flags & CODEC_FLAG_LOOP_FILTER);
396 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
397 switch(avctx->codec_id) {
398 case AV_CODEC_ID_MPEG1VIDEO:
399 case AV_CODEC_ID_MPEG2VIDEO:
400 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
402 case AV_CODEC_ID_MPEG4:
403 case AV_CODEC_ID_MSMPEG4V1:
404 case AV_CODEC_ID_MSMPEG4V2:
405 case AV_CODEC_ID_MSMPEG4V3:
406 if (avctx->rc_max_rate >= 15000000) {
407 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
408 } else if(avctx->rc_max_rate >= 2000000) {
409 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
410 } else if(avctx->rc_max_rate >= 384000) {
411 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
413 avctx->rc_buffer_size = 40;
414 avctx->rc_buffer_size *= 16384;
417 if (avctx->rc_buffer_size) {
418 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
422 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
423 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
427 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
428 av_log(avctx, AV_LOG_INFO,
429 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
432 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
433 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
437 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
438 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
442 if (avctx->rc_max_rate &&
443 avctx->rc_max_rate == avctx->bit_rate &&
444 avctx->rc_max_rate != avctx->rc_min_rate) {
445 av_log(avctx, AV_LOG_INFO,
446 "impossible bitrate constraints, this will fail\n");
449 if (avctx->rc_buffer_size &&
450 avctx->bit_rate * (int64_t)avctx->time_base.num >
451 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
452 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
456 if (!s->fixed_qscale &&
457 avctx->bit_rate * av_q2d(avctx->time_base) >
458 avctx->bit_rate_tolerance) {
459 av_log(avctx, AV_LOG_WARNING,
460 "bitrate tolerance %d too small for bitrate %d, overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
461 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
464 if (s->avctx->rc_max_rate &&
465 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
466 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
467 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
468 90000LL * (avctx->rc_buffer_size - 1) >
469 s->avctx->rc_max_rate * 0xFFFFLL) {
470 av_log(avctx, AV_LOG_INFO,
471 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
472 "specified vbv buffer is too large for the given bitrate!\n");
475 if ((s->avctx->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
476 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
477 s->codec_id != AV_CODEC_ID_FLV1) {
478 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
482 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
483 av_log(avctx, AV_LOG_ERROR,
484 "OBMC is only supported with simple mb decision\n");
488 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
489 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
493 if (s->max_b_frames &&
494 s->codec_id != AV_CODEC_ID_MPEG4 &&
495 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
496 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
497 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
500 if (s->max_b_frames < 0) {
501 av_log(avctx, AV_LOG_ERROR,
502 "max b frames must be 0 or positive for mpegvideo based encoders\n");
506 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
507 s->codec_id == AV_CODEC_ID_H263 ||
508 s->codec_id == AV_CODEC_ID_H263P) &&
509 (avctx->sample_aspect_ratio.num > 255 ||
510 avctx->sample_aspect_ratio.den > 255)) {
511 av_log(avctx, AV_LOG_WARNING,
512 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
513 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
514 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
515 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
518 if ((s->codec_id == AV_CODEC_ID_H263 ||
519 s->codec_id == AV_CODEC_ID_H263P) &&
520 (avctx->width > 2048 ||
521 avctx->height > 1152 )) {
522 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
525 if ((s->codec_id == AV_CODEC_ID_H263 ||
526 s->codec_id == AV_CODEC_ID_H263P) &&
527 ((avctx->width &3) ||
528 (avctx->height&3) )) {
529 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
533 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
534 (avctx->width > 4095 ||
535 avctx->height > 4095 )) {
536 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
540 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
541 (avctx->width > 16383 ||
542 avctx->height > 16383 )) {
543 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
547 if (s->codec_id == AV_CODEC_ID_RV10 &&
549 avctx->height&15 )) {
550 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
551 return AVERROR(EINVAL);
554 if (s->codec_id == AV_CODEC_ID_RV20 &&
557 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
558 return AVERROR(EINVAL);
561 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
562 s->codec_id == AV_CODEC_ID_WMV2) &&
564 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
568 if ((s->avctx->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
569 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
570 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
574 // FIXME mpeg2 uses that too
575 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
576 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
577 av_log(avctx, AV_LOG_ERROR,
578 "mpeg2 style quantization not supported by codec\n");
582 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
583 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
587 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
588 s->avctx->mb_decision != FF_MB_DECISION_RD) {
589 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
593 if (s->avctx->scenechange_threshold < 1000000000 &&
594 (s->avctx->flags & CODEC_FLAG_CLOSED_GOP)) {
595 av_log(avctx, AV_LOG_ERROR,
596 "closed gop with scene change detection are not supported yet, "
597 "set threshold to 1000000000\n");
601 if (s->avctx->flags & CODEC_FLAG_LOW_DELAY) {
602 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
603 av_log(avctx, AV_LOG_ERROR,
604 "low delay forcing is only available for mpeg2\n");
607 if (s->max_b_frames != 0) {
608 av_log(avctx, AV_LOG_ERROR,
609 "b frames cannot be used with low delay\n");
614 if (s->q_scale_type == 1) {
615 if (avctx->qmax > 12) {
616 av_log(avctx, AV_LOG_ERROR,
617 "non linear quant only supports qmax <= 12 currently\n");
622 if (s->avctx->thread_count > 1 &&
623 s->codec_id != AV_CODEC_ID_MPEG4 &&
624 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
625 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
626 s->codec_id != AV_CODEC_ID_MJPEG &&
627 (s->codec_id != AV_CODEC_ID_H263P)) {
628 av_log(avctx, AV_LOG_ERROR,
629 "multi threaded encoding not supported by codec\n");
633 if (s->avctx->thread_count < 1) {
634 av_log(avctx, AV_LOG_ERROR,
635 "automatic thread number detection not supported by codec, "
640 if (s->avctx->slices > 1 || s->avctx->thread_count > 1)
643 if (s->avctx->thread_count > 1 && s->codec_id == AV_CODEC_ID_H263P)
644 s->h263_slice_structured = 1;
646 if (!avctx->time_base.den || !avctx->time_base.num) {
647 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
651 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
652 av_log(avctx, AV_LOG_INFO,
653 "notice: b_frame_strategy only affects the first pass\n");
654 avctx->b_frame_strategy = 0;
657 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
659 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
660 avctx->time_base.den /= i;
661 avctx->time_base.num /= i;
665 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
666 // (a + x * 3 / 8) / x
667 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
668 s->inter_quant_bias = 0;
670 s->intra_quant_bias = 0;
672 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
675 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
676 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
677 return AVERROR(EINVAL);
680 #if FF_API_QUANT_BIAS
681 FF_DISABLE_DEPRECATION_WARNINGS
682 if (s->intra_quant_bias == FF_DEFAULT_QUANT_BIAS &&
683 avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
684 s->intra_quant_bias = avctx->intra_quant_bias;
685 if (s->inter_quant_bias == FF_DEFAULT_QUANT_BIAS &&
686 avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
687 s->inter_quant_bias = avctx->inter_quant_bias;
688 FF_ENABLE_DEPRECATION_WARNINGS
691 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
693 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
694 s->avctx->time_base.den > (1 << 16) - 1) {
695 av_log(avctx, AV_LOG_ERROR,
696 "timebase %d/%d not supported by MPEG 4 standard, "
697 "the maximum admitted value for the timebase denominator "
698 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
702 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
704 switch (avctx->codec->id) {
705 case AV_CODEC_ID_MPEG1VIDEO:
706 s->out_format = FMT_MPEG1;
707 s->low_delay = !!(s->avctx->flags & CODEC_FLAG_LOW_DELAY);
708 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
710 case AV_CODEC_ID_MPEG2VIDEO:
711 s->out_format = FMT_MPEG1;
712 s->low_delay = !!(s->avctx->flags & CODEC_FLAG_LOW_DELAY);
713 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
716 case AV_CODEC_ID_MJPEG:
717 case AV_CODEC_ID_AMV:
718 s->out_format = FMT_MJPEG;
719 s->intra_only = 1; /* force intra only for jpeg */
720 if (!CONFIG_MJPEG_ENCODER ||
721 ff_mjpeg_encode_init(s) < 0)
726 case AV_CODEC_ID_H261:
727 if (!CONFIG_H261_ENCODER)
729 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
730 av_log(avctx, AV_LOG_ERROR,
731 "The specified picture size of %dx%d is not valid for the "
732 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
733 s->width, s->height);
736 s->out_format = FMT_H261;
739 s->rtp_mode = 0; /* Sliced encoding not supported */
741 case AV_CODEC_ID_H263:
742 if (!CONFIG_H263_ENCODER)
744 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
745 s->width, s->height) == 8) {
746 av_log(avctx, AV_LOG_ERROR,
747 "The specified picture size of %dx%d is not valid for "
748 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
749 "352x288, 704x576, and 1408x1152. "
750 "Try H.263+.\n", s->width, s->height);
753 s->out_format = FMT_H263;
757 case AV_CODEC_ID_H263P:
758 s->out_format = FMT_H263;
761 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
762 s->modified_quant = s->h263_aic;
763 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
764 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
767 /* These are just to be sure */
771 case AV_CODEC_ID_FLV1:
772 s->out_format = FMT_H263;
773 s->h263_flv = 2; /* format = 1; 11-bit codes */
774 s->unrestricted_mv = 1;
775 s->rtp_mode = 0; /* don't allow GOB */
779 case AV_CODEC_ID_RV10:
780 s->out_format = FMT_H263;
784 case AV_CODEC_ID_RV20:
785 s->out_format = FMT_H263;
788 s->modified_quant = 1;
792 s->unrestricted_mv = 0;
794 case AV_CODEC_ID_MPEG4:
795 s->out_format = FMT_H263;
797 s->unrestricted_mv = 1;
798 s->low_delay = s->max_b_frames ? 0 : 1;
799 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
801 case AV_CODEC_ID_MSMPEG4V2:
802 s->out_format = FMT_H263;
804 s->unrestricted_mv = 1;
805 s->msmpeg4_version = 2;
809 case AV_CODEC_ID_MSMPEG4V3:
810 s->out_format = FMT_H263;
812 s->unrestricted_mv = 1;
813 s->msmpeg4_version = 3;
814 s->flipflop_rounding = 1;
818 case AV_CODEC_ID_WMV1:
819 s->out_format = FMT_H263;
821 s->unrestricted_mv = 1;
822 s->msmpeg4_version = 4;
823 s->flipflop_rounding = 1;
827 case AV_CODEC_ID_WMV2:
828 s->out_format = FMT_H263;
830 s->unrestricted_mv = 1;
831 s->msmpeg4_version = 5;
832 s->flipflop_rounding = 1;
840 avctx->has_b_frames = !s->low_delay;
844 s->progressive_frame =
845 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
846 CODEC_FLAG_INTERLACED_ME) ||
851 if (ff_mpv_common_init(s) < 0)
854 ff_fdctdsp_init(&s->fdsp, avctx);
855 ff_me_cmp_init(&s->mecc, avctx);
856 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
857 ff_pixblockdsp_init(&s->pdsp, avctx);
858 ff_qpeldsp_init(&s->qdsp);
860 if (s->msmpeg4_version) {
861 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
862 2 * 2 * (MAX_LEVEL + 1) *
863 (MAX_RUN + 1) * 2 * sizeof(int), fail);
865 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
867 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
868 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
869 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
870 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
871 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
872 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
873 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
874 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
875 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
876 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
878 if (s->avctx->noise_reduction) {
879 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
880 2 * 64 * sizeof(uint16_t), fail);
883 ff_dct_encode_init(s);
885 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
886 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
888 s->quant_precision = 5;
890 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
891 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->avctx->frame_skip_cmp);
893 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
894 ff_h261_encode_init(s);
895 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
896 ff_h263_encode_init(s);
897 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
898 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
900 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
901 && s->out_format == FMT_MPEG1)
902 ff_mpeg1_encode_init(s);
905 for (i = 0; i < 64; i++) {
906 int j = s->idsp.idct_permutation[i];
907 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
909 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
910 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
911 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
913 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
916 s->chroma_intra_matrix[j] =
917 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
918 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
920 if (s->avctx->intra_matrix)
921 s->intra_matrix[j] = s->avctx->intra_matrix[i];
922 if (s->avctx->inter_matrix)
923 s->inter_matrix[j] = s->avctx->inter_matrix[i];
926 /* precompute matrix */
927 /* for mjpeg, we do include qscale in the matrix */
928 if (s->out_format != FMT_MJPEG) {
929 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
930 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
932 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
933 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
937 if (ff_rate_control_init(s) < 0)
940 #if FF_API_ERROR_RATE
941 FF_DISABLE_DEPRECATION_WARNINGS
942 if (avctx->error_rate)
943 s->error_rate = avctx->error_rate;
944 FF_ENABLE_DEPRECATION_WARNINGS;
947 #if FF_API_NORMALIZE_AQP
948 FF_DISABLE_DEPRECATION_WARNINGS
949 if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
950 s->mpv_flags |= FF_MPV_FLAG_NAQ;
951 FF_ENABLE_DEPRECATION_WARNINGS;
955 FF_DISABLE_DEPRECATION_WARNINGS
956 if (avctx->flags & CODEC_FLAG_MV0)
957 s->mpv_flags |= FF_MPV_FLAG_MV0;
958 FF_ENABLE_DEPRECATION_WARNINGS
962 FF_DISABLE_DEPRECATION_WARNINGS
963 if (avctx->rc_qsquish != 0.0)
964 s->rc_qsquish = avctx->rc_qsquish;
965 if (avctx->rc_qmod_amp != 0.0)
966 s->rc_qmod_amp = avctx->rc_qmod_amp;
967 if (avctx->rc_qmod_freq)
968 s->rc_qmod_freq = avctx->rc_qmod_freq;
969 if (avctx->rc_buffer_aggressivity != 1.0)
970 s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
971 if (avctx->rc_initial_cplx != 0.0)
972 s->rc_initial_cplx = avctx->rc_initial_cplx;
974 s->lmin = avctx->lmin;
976 s->lmax = avctx->lmax;
980 s->rc_eq = av_strdup(avctx->rc_eq);
982 return AVERROR(ENOMEM);
984 FF_ENABLE_DEPRECATION_WARNINGS
987 if (avctx->b_frame_strategy == 2) {
988 for (i = 0; i < s->max_b_frames + 2; i++) {
989 s->tmp_frames[i] = av_frame_alloc();
990 if (!s->tmp_frames[i])
991 return AVERROR(ENOMEM);
993 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
994 s->tmp_frames[i]->width = s->width >> avctx->brd_scale;
995 s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
997 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1005 ff_mpv_encode_end(avctx);
1006 return AVERROR_UNKNOWN;
1009 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1011 MpegEncContext *s = avctx->priv_data;
1014 ff_rate_control_uninit(s);
1016 ff_mpv_common_end(s);
1017 if (CONFIG_MJPEG_ENCODER &&
1018 s->out_format == FMT_MJPEG)
1019 ff_mjpeg_encode_close(s);
1021 av_freep(&avctx->extradata);
1023 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1024 av_frame_free(&s->tmp_frames[i]);
1026 ff_free_picture_tables(&s->new_picture);
1027 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1029 av_freep(&s->avctx->stats_out);
1030 av_freep(&s->ac_stats);
1032 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1033 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1034 s->q_chroma_intra_matrix= NULL;
1035 s->q_chroma_intra_matrix16= NULL;
1036 av_freep(&s->q_intra_matrix);
1037 av_freep(&s->q_inter_matrix);
1038 av_freep(&s->q_intra_matrix16);
1039 av_freep(&s->q_inter_matrix16);
1040 av_freep(&s->input_picture);
1041 av_freep(&s->reordered_input_picture);
1042 av_freep(&s->dct_offset);
1047 static int get_sae(uint8_t *src, int ref, int stride)
1052 for (y = 0; y < 16; y++) {
1053 for (x = 0; x < 16; x++) {
1054 acc += FFABS(src[x + y * stride] - ref);
1061 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1062 uint8_t *ref, int stride)
1068 h = s->height & ~15;
1070 for (y = 0; y < h; y += 16) {
1071 for (x = 0; x < w; x += 16) {
1072 int offset = x + y * stride;
1073 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1075 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1076 int sae = get_sae(src + offset, mean, stride);
1078 acc += sae + 500 < sad;
1084 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1086 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1087 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1088 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1089 &s->linesize, &s->uvlinesize);
1092 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1094 Picture *pic = NULL;
1096 int i, display_picture_number = 0, ret;
1097 const int encoding_delay = s->max_b_frames ? s->max_b_frames :
1098 (s->low_delay ? 0 : 1);
1103 display_picture_number = s->input_picture_number++;
1105 if (pts != AV_NOPTS_VALUE) {
1106 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1107 int64_t last = s->user_specified_pts;
1110 av_log(s->avctx, AV_LOG_ERROR,
1111 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1113 return AVERROR(EINVAL);
1116 if (!s->low_delay && display_picture_number == 1)
1117 s->dts_delta = pts - last;
1119 s->user_specified_pts = pts;
1121 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1122 s->user_specified_pts =
1123 pts = s->user_specified_pts + 1;
1124 av_log(s->avctx, AV_LOG_INFO,
1125 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1128 pts = display_picture_number;
1134 if (!pic_arg->buf[0] ||
1135 pic_arg->linesize[0] != s->linesize ||
1136 pic_arg->linesize[1] != s->uvlinesize ||
1137 pic_arg->linesize[2] != s->uvlinesize)
1139 if ((s->width & 15) || (s->height & 15))
1141 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1143 if (s->linesize & (STRIDE_ALIGN-1))
1146 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1147 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1149 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1153 pic = &s->picture[i];
1157 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1160 ret = alloc_picture(s, pic, direct);
1165 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1166 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1167 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1170 int h_chroma_shift, v_chroma_shift;
1171 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1175 for (i = 0; i < 3; i++) {
1176 int src_stride = pic_arg->linesize[i];
1177 int dst_stride = i ? s->uvlinesize : s->linesize;
1178 int h_shift = i ? h_chroma_shift : 0;
1179 int v_shift = i ? v_chroma_shift : 0;
1180 int w = s->width >> h_shift;
1181 int h = s->height >> v_shift;
1182 uint8_t *src = pic_arg->data[i];
1183 uint8_t *dst = pic->f->data[i];
1186 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1187 && !s->progressive_sequence
1188 && FFALIGN(s->height, 32) - s->height > 16)
1191 if (!s->avctx->rc_buffer_size)
1192 dst += INPLACE_OFFSET;
1194 if (src_stride == dst_stride)
1195 memcpy(dst, src, src_stride * h);
1198 uint8_t *dst2 = dst;
1200 memcpy(dst2, src, w);
1205 if ((s->width & 15) || (s->height & (vpad-1))) {
1206 s->mpvencdsp.draw_edges(dst, dst_stride,
1215 ret = av_frame_copy_props(pic->f, pic_arg);
1219 pic->f->display_picture_number = display_picture_number;
1220 pic->f->pts = pts; // we set this here to avoid modifiying pic_arg
1223 /* shift buffer entries */
1224 for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1225 s->input_picture[i - 1] = s->input_picture[i];
1227 s->input_picture[encoding_delay] = (Picture*) pic;
1232 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1236 int64_t score64 = 0;
1238 for (plane = 0; plane < 3; plane++) {
1239 const int stride = p->f->linesize[plane];
1240 const int bw = plane ? 1 : 2;
1241 for (y = 0; y < s->mb_height * bw; y++) {
1242 for (x = 0; x < s->mb_width * bw; x++) {
1243 int off = p->shared ? 0 : 16;
1244 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1245 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1246 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1248 switch (FFABS(s->avctx->frame_skip_exp)) {
1249 case 0: score = FFMAX(score, v); break;
1250 case 1: score += FFABS(v); break;
1251 case 2: score64 += v * (int64_t)v; break;
1252 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1253 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1262 if (s->avctx->frame_skip_exp < 0)
1263 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1264 -1.0/s->avctx->frame_skip_exp);
1266 if (score64 < s->avctx->frame_skip_threshold)
1268 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1273 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1275 AVPacket pkt = { 0 };
1276 int ret, got_output;
1278 av_init_packet(&pkt);
1279 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1284 av_free_packet(&pkt);
1288 static int estimate_best_b_count(MpegEncContext *s)
1290 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1291 AVCodecContext *c = avcodec_alloc_context3(NULL);
1292 const int scale = s->avctx->brd_scale;
1293 int i, j, out_size, p_lambda, b_lambda, lambda2;
1294 int64_t best_rd = INT64_MAX;
1295 int best_b_count = -1;
1298 return AVERROR(ENOMEM);
1299 av_assert0(scale >= 0 && scale <= 3);
1302 //s->next_picture_ptr->quality;
1303 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1304 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1305 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1306 if (!b_lambda) // FIXME we should do this somewhere else
1307 b_lambda = p_lambda;
1308 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1311 c->width = s->width >> scale;
1312 c->height = s->height >> scale;
1313 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR;
1314 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1315 c->mb_decision = s->avctx->mb_decision;
1316 c->me_cmp = s->avctx->me_cmp;
1317 c->mb_cmp = s->avctx->mb_cmp;
1318 c->me_sub_cmp = s->avctx->me_sub_cmp;
1319 c->pix_fmt = AV_PIX_FMT_YUV420P;
1320 c->time_base = s->avctx->time_base;
1321 c->max_b_frames = s->max_b_frames;
1323 if (avcodec_open2(c, codec, NULL) < 0)
1326 for (i = 0; i < s->max_b_frames + 2; i++) {
1327 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1328 s->next_picture_ptr;
1331 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1332 pre_input = *pre_input_ptr;
1333 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1335 if (!pre_input.shared && i) {
1336 data[0] += INPLACE_OFFSET;
1337 data[1] += INPLACE_OFFSET;
1338 data[2] += INPLACE_OFFSET;
1341 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1342 s->tmp_frames[i]->linesize[0],
1344 pre_input.f->linesize[0],
1345 c->width, c->height);
1346 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1347 s->tmp_frames[i]->linesize[1],
1349 pre_input.f->linesize[1],
1350 c->width >> 1, c->height >> 1);
1351 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1352 s->tmp_frames[i]->linesize[2],
1354 pre_input.f->linesize[2],
1355 c->width >> 1, c->height >> 1);
1359 for (j = 0; j < s->max_b_frames + 1; j++) {
1362 if (!s->input_picture[j])
1365 c->error[0] = c->error[1] = c->error[2] = 0;
1367 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1368 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1370 out_size = encode_frame(c, s->tmp_frames[0]);
1372 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1374 for (i = 0; i < s->max_b_frames + 1; i++) {
1375 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1377 s->tmp_frames[i + 1]->pict_type = is_p ?
1378 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1379 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1381 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1383 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1386 /* get the delayed frames */
1388 out_size = encode_frame(c, NULL);
1389 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1392 rd += c->error[0] + c->error[1] + c->error[2];
1403 return best_b_count;
1406 static int select_input_picture(MpegEncContext *s)
1410 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1411 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1412 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1414 /* set next picture type & ordering */
1415 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1416 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1417 if (s->picture_in_gop_number < s->gop_size &&
1418 s->next_picture_ptr &&
1419 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1420 // FIXME check that te gop check above is +-1 correct
1421 av_frame_unref(s->input_picture[0]->f);
1423 ff_vbv_update(s, 0);
1429 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1430 !s->next_picture_ptr || s->intra_only) {
1431 s->reordered_input_picture[0] = s->input_picture[0];
1432 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1433 s->reordered_input_picture[0]->f->coded_picture_number =
1434 s->coded_picture_number++;
1438 if (s->avctx->flags & CODEC_FLAG_PASS2) {
1439 for (i = 0; i < s->max_b_frames + 1; i++) {
1440 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1442 if (pict_num >= s->rc_context.num_entries)
1444 if (!s->input_picture[i]) {
1445 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1449 s->input_picture[i]->f->pict_type =
1450 s->rc_context.entry[pict_num].new_pict_type;
1454 if (s->avctx->b_frame_strategy == 0) {
1455 b_frames = s->max_b_frames;
1456 while (b_frames && !s->input_picture[b_frames])
1458 } else if (s->avctx->b_frame_strategy == 1) {
1459 for (i = 1; i < s->max_b_frames + 1; i++) {
1460 if (s->input_picture[i] &&
1461 s->input_picture[i]->b_frame_score == 0) {
1462 s->input_picture[i]->b_frame_score =
1464 s->input_picture[i ]->f->data[0],
1465 s->input_picture[i - 1]->f->data[0],
1469 for (i = 0; i < s->max_b_frames + 1; i++) {
1470 if (!s->input_picture[i] ||
1471 s->input_picture[i]->b_frame_score - 1 >
1472 s->mb_num / s->avctx->b_sensitivity)
1476 b_frames = FFMAX(0, i - 1);
1479 for (i = 0; i < b_frames + 1; i++) {
1480 s->input_picture[i]->b_frame_score = 0;
1482 } else if (s->avctx->b_frame_strategy == 2) {
1483 b_frames = estimate_best_b_count(s);
1485 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1491 for (i = b_frames - 1; i >= 0; i--) {
1492 int type = s->input_picture[i]->f->pict_type;
1493 if (type && type != AV_PICTURE_TYPE_B)
1496 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1497 b_frames == s->max_b_frames) {
1498 av_log(s->avctx, AV_LOG_ERROR,
1499 "warning, too many b frames in a row\n");
1502 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1503 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1504 s->gop_size > s->picture_in_gop_number) {
1505 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1507 if (s->avctx->flags & CODEC_FLAG_CLOSED_GOP)
1509 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1513 if ((s->avctx->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1514 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1517 s->reordered_input_picture[0] = s->input_picture[b_frames];
1518 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1519 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1520 s->reordered_input_picture[0]->f->coded_picture_number =
1521 s->coded_picture_number++;
1522 for (i = 0; i < b_frames; i++) {
1523 s->reordered_input_picture[i + 1] = s->input_picture[i];
1524 s->reordered_input_picture[i + 1]->f->pict_type =
1526 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1527 s->coded_picture_number++;
1532 if (s->reordered_input_picture[0]) {
1533 s->reordered_input_picture[0]->reference =
1534 s->reordered_input_picture[0]->f->pict_type !=
1535 AV_PICTURE_TYPE_B ? 3 : 0;
1537 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1538 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1541 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1542 // input is a shared pix, so we can't modifiy it -> alloc a new
1543 // one & ensure that the shared one is reuseable
1546 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1549 pic = &s->picture[i];
1551 pic->reference = s->reordered_input_picture[0]->reference;
1552 if (alloc_picture(s, pic, 0) < 0) {
1556 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1560 /* mark us unused / free shared pic */
1561 av_frame_unref(s->reordered_input_picture[0]->f);
1562 s->reordered_input_picture[0]->shared = 0;
1564 s->current_picture_ptr = pic;
1566 // input is not a shared pix -> reuse buffer for current_pix
1567 s->current_picture_ptr = s->reordered_input_picture[0];
1568 for (i = 0; i < 4; i++) {
1569 s->new_picture.f->data[i] += INPLACE_OFFSET;
1572 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1573 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1574 s->current_picture_ptr)) < 0)
1577 s->picture_number = s->new_picture.f->display_picture_number;
1579 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1584 static void frame_end(MpegEncContext *s)
1586 if (s->unrestricted_mv &&
1587 s->current_picture.reference &&
1589 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1590 int hshift = desc->log2_chroma_w;
1591 int vshift = desc->log2_chroma_h;
1592 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1593 s->current_picture.f->linesize[0],
1594 s->h_edge_pos, s->v_edge_pos,
1595 EDGE_WIDTH, EDGE_WIDTH,
1596 EDGE_TOP | EDGE_BOTTOM);
1597 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1598 s->current_picture.f->linesize[1],
1599 s->h_edge_pos >> hshift,
1600 s->v_edge_pos >> vshift,
1601 EDGE_WIDTH >> hshift,
1602 EDGE_WIDTH >> vshift,
1603 EDGE_TOP | EDGE_BOTTOM);
1604 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1605 s->current_picture.f->linesize[2],
1606 s->h_edge_pos >> hshift,
1607 s->v_edge_pos >> vshift,
1608 EDGE_WIDTH >> hshift,
1609 EDGE_WIDTH >> vshift,
1610 EDGE_TOP | EDGE_BOTTOM);
1615 s->last_pict_type = s->pict_type;
1616 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1617 if (s->pict_type!= AV_PICTURE_TYPE_B)
1618 s->last_non_b_pict_type = s->pict_type;
1620 s->avctx->coded_frame = s->current_picture_ptr->f;
1624 static void update_noise_reduction(MpegEncContext *s)
1628 for (intra = 0; intra < 2; intra++) {
1629 if (s->dct_count[intra] > (1 << 16)) {
1630 for (i = 0; i < 64; i++) {
1631 s->dct_error_sum[intra][i] >>= 1;
1633 s->dct_count[intra] >>= 1;
1636 for (i = 0; i < 64; i++) {
1637 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1638 s->dct_count[intra] +
1639 s->dct_error_sum[intra][i] / 2) /
1640 (s->dct_error_sum[intra][i] + 1);
1645 static int frame_start(MpegEncContext *s)
1649 /* mark & release old frames */
1650 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1651 s->last_picture_ptr != s->next_picture_ptr &&
1652 s->last_picture_ptr->f->buf[0]) {
1653 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1656 s->current_picture_ptr->f->pict_type = s->pict_type;
1657 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1659 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1660 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1661 s->current_picture_ptr)) < 0)
1664 if (s->pict_type != AV_PICTURE_TYPE_B) {
1665 s->last_picture_ptr = s->next_picture_ptr;
1667 s->next_picture_ptr = s->current_picture_ptr;
1670 if (s->last_picture_ptr) {
1671 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1672 if (s->last_picture_ptr->f->buf[0] &&
1673 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1674 s->last_picture_ptr)) < 0)
1677 if (s->next_picture_ptr) {
1678 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1679 if (s->next_picture_ptr->f->buf[0] &&
1680 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1681 s->next_picture_ptr)) < 0)
1685 if (s->picture_structure!= PICT_FRAME) {
1687 for (i = 0; i < 4; i++) {
1688 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1689 s->current_picture.f->data[i] +=
1690 s->current_picture.f->linesize[i];
1692 s->current_picture.f->linesize[i] *= 2;
1693 s->last_picture.f->linesize[i] *= 2;
1694 s->next_picture.f->linesize[i] *= 2;
1698 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1699 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1700 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1701 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1702 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1703 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1705 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1706 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1709 if (s->dct_error_sum) {
1710 av_assert2(s->avctx->noise_reduction && s->encoding);
1711 update_noise_reduction(s);
1717 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1718 const AVFrame *pic_arg, int *got_packet)
1720 MpegEncContext *s = avctx->priv_data;
1721 int i, stuffing_count, ret;
1722 int context_count = s->slice_context_count;
1724 s->picture_in_gop_number++;
1726 if (load_input_picture(s, pic_arg) < 0)
1729 if (select_input_picture(s) < 0) {
1734 if (s->new_picture.f->data[0]) {
1735 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1736 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - FF_INPUT_BUFFER_PADDING_SIZE
1738 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1739 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size)) < 0)
1742 s->mb_info_ptr = av_packet_new_side_data(pkt,
1743 AV_PKT_DATA_H263_MB_INFO,
1744 s->mb_width*s->mb_height*12);
1745 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1748 for (i = 0; i < context_count; i++) {
1749 int start_y = s->thread_context[i]->start_mb_y;
1750 int end_y = s->thread_context[i]-> end_mb_y;
1751 int h = s->mb_height;
1752 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1753 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1755 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1758 s->pict_type = s->new_picture.f->pict_type;
1760 ret = frame_start(s);
1764 ret = encode_picture(s, s->picture_number);
1765 if (growing_buffer) {
1766 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1767 pkt->data = s->pb.buf;
1768 pkt->size = avctx->internal->byte_buffer_size;
1773 avctx->header_bits = s->header_bits;
1774 avctx->mv_bits = s->mv_bits;
1775 avctx->misc_bits = s->misc_bits;
1776 avctx->i_tex_bits = s->i_tex_bits;
1777 avctx->p_tex_bits = s->p_tex_bits;
1778 avctx->i_count = s->i_count;
1779 // FIXME f/b_count in avctx
1780 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1781 avctx->skip_count = s->skip_count;
1785 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1786 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1788 if (avctx->rc_buffer_size) {
1789 RateControlContext *rcc = &s->rc_context;
1790 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1792 if (put_bits_count(&s->pb) > max_size &&
1793 s->lambda < s->lmax) {
1794 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1795 (s->qscale + 1) / s->qscale);
1796 if (s->adaptive_quant) {
1798 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1799 s->lambda_table[i] =
1800 FFMAX(s->lambda_table[i] + 1,
1801 s->lambda_table[i] * (s->qscale + 1) /
1804 s->mb_skipped = 0; // done in frame_start()
1805 // done in encode_picture() so we must undo it
1806 if (s->pict_type == AV_PICTURE_TYPE_P) {
1807 if (s->flipflop_rounding ||
1808 s->codec_id == AV_CODEC_ID_H263P ||
1809 s->codec_id == AV_CODEC_ID_MPEG4)
1810 s->no_rounding ^= 1;
1812 if (s->pict_type != AV_PICTURE_TYPE_B) {
1813 s->time_base = s->last_time_base;
1814 s->last_non_b_time = s->time - s->pp_time;
1816 for (i = 0; i < context_count; i++) {
1817 PutBitContext *pb = &s->thread_context[i]->pb;
1818 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1820 av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1824 av_assert0(s->avctx->rc_max_rate);
1827 if (s->avctx->flags & CODEC_FLAG_PASS1)
1828 ff_write_pass1_stats(s);
1830 for (i = 0; i < 4; i++) {
1831 s->current_picture_ptr->f->error[i] =
1832 s->current_picture.f->error[i] =
1833 s->current_picture.error[i];
1834 avctx->error[i] += s->current_picture_ptr->f->error[i];
1837 if (s->avctx->flags & CODEC_FLAG_PASS1)
1838 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1839 avctx->i_tex_bits + avctx->p_tex_bits ==
1840 put_bits_count(&s->pb));
1841 flush_put_bits(&s->pb);
1842 s->frame_bits = put_bits_count(&s->pb);
1844 stuffing_count = ff_vbv_update(s, s->frame_bits);
1845 s->stuffing_bits = 8*stuffing_count;
1846 if (stuffing_count) {
1847 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1848 stuffing_count + 50) {
1849 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1853 switch (s->codec_id) {
1854 case AV_CODEC_ID_MPEG1VIDEO:
1855 case AV_CODEC_ID_MPEG2VIDEO:
1856 while (stuffing_count--) {
1857 put_bits(&s->pb, 8, 0);
1860 case AV_CODEC_ID_MPEG4:
1861 put_bits(&s->pb, 16, 0);
1862 put_bits(&s->pb, 16, 0x1C3);
1863 stuffing_count -= 4;
1864 while (stuffing_count--) {
1865 put_bits(&s->pb, 8, 0xFF);
1869 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1871 flush_put_bits(&s->pb);
1872 s->frame_bits = put_bits_count(&s->pb);
1875 /* update mpeg1/2 vbv_delay for CBR */
1876 if (s->avctx->rc_max_rate &&
1877 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1878 s->out_format == FMT_MPEG1 &&
1879 90000LL * (avctx->rc_buffer_size - 1) <=
1880 s->avctx->rc_max_rate * 0xFFFFLL) {
1881 int vbv_delay, min_delay;
1882 double inbits = s->avctx->rc_max_rate *
1883 av_q2d(s->avctx->time_base);
1884 int minbits = s->frame_bits - 8 *
1885 (s->vbv_delay_ptr - s->pb.buf - 1);
1886 double bits = s->rc_context.buffer_index + minbits - inbits;
1889 av_log(s->avctx, AV_LOG_ERROR,
1890 "Internal error, negative bits\n");
1892 assert(s->repeat_first_field == 0);
1894 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1895 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1896 s->avctx->rc_max_rate;
1898 vbv_delay = FFMAX(vbv_delay, min_delay);
1900 av_assert0(vbv_delay < 0xFFFF);
1902 s->vbv_delay_ptr[0] &= 0xF8;
1903 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1904 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1905 s->vbv_delay_ptr[2] &= 0x07;
1906 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1907 avctx->vbv_delay = vbv_delay * 300;
1909 s->total_bits += s->frame_bits;
1910 avctx->frame_bits = s->frame_bits;
1912 pkt->pts = s->current_picture.f->pts;
1913 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1914 if (!s->current_picture.f->coded_picture_number)
1915 pkt->dts = pkt->pts - s->dts_delta;
1917 pkt->dts = s->reordered_pts;
1918 s->reordered_pts = pkt->pts;
1920 pkt->dts = pkt->pts;
1921 if (s->current_picture.f->key_frame)
1922 pkt->flags |= AV_PKT_FLAG_KEY;
1924 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1929 /* release non-reference frames */
1930 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1931 if (!s->picture[i].reference)
1932 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1935 av_assert1((s->frame_bits & 7) == 0);
1937 pkt->size = s->frame_bits / 8;
1938 *got_packet = !!pkt->size;
1942 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1943 int n, int threshold)
1945 static const char tab[64] = {
1946 3, 2, 2, 1, 1, 1, 1, 1,
1947 1, 1, 1, 1, 1, 1, 1, 1,
1948 1, 1, 1, 1, 1, 1, 1, 1,
1949 0, 0, 0, 0, 0, 0, 0, 0,
1950 0, 0, 0, 0, 0, 0, 0, 0,
1951 0, 0, 0, 0, 0, 0, 0, 0,
1952 0, 0, 0, 0, 0, 0, 0, 0,
1953 0, 0, 0, 0, 0, 0, 0, 0
1958 int16_t *block = s->block[n];
1959 const int last_index = s->block_last_index[n];
1962 if (threshold < 0) {
1964 threshold = -threshold;
1968 /* Are all we could set to zero already zero? */
1969 if (last_index <= skip_dc - 1)
1972 for (i = 0; i <= last_index; i++) {
1973 const int j = s->intra_scantable.permutated[i];
1974 const int level = FFABS(block[j]);
1976 if (skip_dc && i == 0)
1980 } else if (level > 1) {
1986 if (score >= threshold)
1988 for (i = skip_dc; i <= last_index; i++) {
1989 const int j = s->intra_scantable.permutated[i];
1993 s->block_last_index[n] = 0;
1995 s->block_last_index[n] = -1;
1998 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2002 const int maxlevel = s->max_qcoeff;
2003 const int minlevel = s->min_qcoeff;
2007 i = 1; // skip clipping of intra dc
2011 for (; i <= last_index; i++) {
2012 const int j = s->intra_scantable.permutated[i];
2013 int level = block[j];
2015 if (level > maxlevel) {
2018 } else if (level < minlevel) {
2026 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2027 av_log(s->avctx, AV_LOG_INFO,
2028 "warning, clipping %d dct coefficients to %d..%d\n",
2029 overflow, minlevel, maxlevel);
2032 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2036 for (y = 0; y < 8; y++) {
2037 for (x = 0; x < 8; x++) {
2043 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2044 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2045 int v = ptr[x2 + y2 * stride];
2051 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2056 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2057 int motion_x, int motion_y,
2058 int mb_block_height,
2062 int16_t weight[12][64];
2063 int16_t orig[12][64];
2064 const int mb_x = s->mb_x;
2065 const int mb_y = s->mb_y;
2068 int dct_offset = s->linesize * 8; // default for progressive frames
2069 int uv_dct_offset = s->uvlinesize * 8;
2070 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2071 ptrdiff_t wrap_y, wrap_c;
2073 for (i = 0; i < mb_block_count; i++)
2074 skip_dct[i] = s->skipdct;
2076 if (s->adaptive_quant) {
2077 const int last_qp = s->qscale;
2078 const int mb_xy = mb_x + mb_y * s->mb_stride;
2080 s->lambda = s->lambda_table[mb_xy];
2083 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2084 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2085 s->dquant = s->qscale - last_qp;
2087 if (s->out_format == FMT_H263) {
2088 s->dquant = av_clip(s->dquant, -2, 2);
2090 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2092 if (s->pict_type == AV_PICTURE_TYPE_B) {
2093 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2096 if (s->mv_type == MV_TYPE_8X8)
2102 ff_set_qscale(s, last_qp + s->dquant);
2103 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2104 ff_set_qscale(s, s->qscale + s->dquant);
2106 wrap_y = s->linesize;
2107 wrap_c = s->uvlinesize;
2108 ptr_y = s->new_picture.f->data[0] +
2109 (mb_y * 16 * wrap_y) + mb_x * 16;
2110 ptr_cb = s->new_picture.f->data[1] +
2111 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2112 ptr_cr = s->new_picture.f->data[2] +
2113 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2115 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2116 uint8_t *ebuf = s->sc.edge_emu_buffer + 36 * wrap_y;
2117 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2118 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2119 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2121 16, 16, mb_x * 16, mb_y * 16,
2122 s->width, s->height);
2124 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2126 mb_block_width, mb_block_height,
2127 mb_x * mb_block_width, mb_y * mb_block_height,
2129 ptr_cb = ebuf + 16 * wrap_y;
2130 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2132 mb_block_width, mb_block_height,
2133 mb_x * mb_block_width, mb_y * mb_block_height,
2135 ptr_cr = ebuf + 16 * wrap_y + 16;
2139 if (s->avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
2140 int progressive_score, interlaced_score;
2142 s->interlaced_dct = 0;
2143 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2144 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2145 NULL, wrap_y, 8) - 400;
2147 if (progressive_score > 0) {
2148 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2149 NULL, wrap_y * 2, 8) +
2150 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2151 NULL, wrap_y * 2, 8);
2152 if (progressive_score > interlaced_score) {
2153 s->interlaced_dct = 1;
2155 dct_offset = wrap_y;
2156 uv_dct_offset = wrap_c;
2158 if (s->chroma_format == CHROMA_422 ||
2159 s->chroma_format == CHROMA_444)
2165 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2166 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2167 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2168 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2170 if (s->avctx->flags & CODEC_FLAG_GRAY) {
2174 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2175 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2176 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2177 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2178 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2179 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2180 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2181 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2182 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2183 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2184 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2185 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2189 op_pixels_func (*op_pix)[4];
2190 qpel_mc_func (*op_qpix)[16];
2191 uint8_t *dest_y, *dest_cb, *dest_cr;
2193 dest_y = s->dest[0];
2194 dest_cb = s->dest[1];
2195 dest_cr = s->dest[2];
2197 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2198 op_pix = s->hdsp.put_pixels_tab;
2199 op_qpix = s->qdsp.put_qpel_pixels_tab;
2201 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2202 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2205 if (s->mv_dir & MV_DIR_FORWARD) {
2206 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2207 s->last_picture.f->data,
2209 op_pix = s->hdsp.avg_pixels_tab;
2210 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2212 if (s->mv_dir & MV_DIR_BACKWARD) {
2213 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2214 s->next_picture.f->data,
2218 if (s->avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
2219 int progressive_score, interlaced_score;
2221 s->interlaced_dct = 0;
2222 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2223 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2227 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2228 progressive_score -= 400;
2230 if (progressive_score > 0) {
2231 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2233 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2237 if (progressive_score > interlaced_score) {
2238 s->interlaced_dct = 1;
2240 dct_offset = wrap_y;
2241 uv_dct_offset = wrap_c;
2243 if (s->chroma_format == CHROMA_422)
2249 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2250 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2251 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2252 dest_y + dct_offset, wrap_y);
2253 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2254 dest_y + dct_offset + 8, wrap_y);
2256 if (s->avctx->flags & CODEC_FLAG_GRAY) {
2260 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2261 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2262 if (!s->chroma_y_shift) { /* 422 */
2263 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2264 dest_cb + uv_dct_offset, wrap_c);
2265 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2266 dest_cr + uv_dct_offset, wrap_c);
2269 /* pre quantization */
2270 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2271 2 * s->qscale * s->qscale) {
2273 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2275 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2277 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2278 wrap_y, 8) < 20 * s->qscale)
2280 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2281 wrap_y, 8) < 20 * s->qscale)
2283 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2285 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2287 if (!s->chroma_y_shift) { /* 422 */
2288 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2289 dest_cb + uv_dct_offset,
2290 wrap_c, 8) < 20 * s->qscale)
2292 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2293 dest_cr + uv_dct_offset,
2294 wrap_c, 8) < 20 * s->qscale)
2300 if (s->quantizer_noise_shaping) {
2302 get_visual_weight(weight[0], ptr_y , wrap_y);
2304 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2306 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2308 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2310 get_visual_weight(weight[4], ptr_cb , wrap_c);
2312 get_visual_weight(weight[5], ptr_cr , wrap_c);
2313 if (!s->chroma_y_shift) { /* 422 */
2315 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2318 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2321 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2324 /* DCT & quantize */
2325 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2327 for (i = 0; i < mb_block_count; i++) {
2330 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2331 // FIXME we could decide to change to quantizer instead of
2333 // JS: I don't think that would be a good idea it could lower
2334 // quality instead of improve it. Just INTRADC clipping
2335 // deserves changes in quantizer
2337 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2339 s->block_last_index[i] = -1;
2341 if (s->quantizer_noise_shaping) {
2342 for (i = 0; i < mb_block_count; i++) {
2344 s->block_last_index[i] =
2345 dct_quantize_refine(s, s->block[i], weight[i],
2346 orig[i], i, s->qscale);
2351 if (s->luma_elim_threshold && !s->mb_intra)
2352 for (i = 0; i < 4; i++)
2353 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2354 if (s->chroma_elim_threshold && !s->mb_intra)
2355 for (i = 4; i < mb_block_count; i++)
2356 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2358 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2359 for (i = 0; i < mb_block_count; i++) {
2360 if (s->block_last_index[i] == -1)
2361 s->coded_score[i] = INT_MAX / 256;
2366 if ((s->avctx->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
2367 s->block_last_index[4] =
2368 s->block_last_index[5] = 0;
2370 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2371 if (!s->chroma_y_shift) { /* 422 / 444 */
2372 for (i=6; i<12; i++) {
2373 s->block_last_index[i] = 0;
2374 s->block[i][0] = s->block[4][0];
2379 // non c quantize code returns incorrect block_last_index FIXME
2380 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2381 for (i = 0; i < mb_block_count; i++) {
2383 if (s->block_last_index[i] > 0) {
2384 for (j = 63; j > 0; j--) {
2385 if (s->block[i][s->intra_scantable.permutated[j]])
2388 s->block_last_index[i] = j;
2393 /* huffman encode */
2394 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2395 case AV_CODEC_ID_MPEG1VIDEO:
2396 case AV_CODEC_ID_MPEG2VIDEO:
2397 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2398 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2400 case AV_CODEC_ID_MPEG4:
2401 if (CONFIG_MPEG4_ENCODER)
2402 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2404 case AV_CODEC_ID_MSMPEG4V2:
2405 case AV_CODEC_ID_MSMPEG4V3:
2406 case AV_CODEC_ID_WMV1:
2407 if (CONFIG_MSMPEG4_ENCODER)
2408 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2410 case AV_CODEC_ID_WMV2:
2411 if (CONFIG_WMV2_ENCODER)
2412 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2414 case AV_CODEC_ID_H261:
2415 if (CONFIG_H261_ENCODER)
2416 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2418 case AV_CODEC_ID_H263:
2419 case AV_CODEC_ID_H263P:
2420 case AV_CODEC_ID_FLV1:
2421 case AV_CODEC_ID_RV10:
2422 case AV_CODEC_ID_RV20:
2423 if (CONFIG_H263_ENCODER)
2424 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2426 case AV_CODEC_ID_MJPEG:
2427 case AV_CODEC_ID_AMV:
2428 if (CONFIG_MJPEG_ENCODER)
2429 ff_mjpeg_encode_mb(s, s->block);
2436 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2438 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2439 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2440 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2443 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2446 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2449 d->mb_skip_run= s->mb_skip_run;
2451 d->last_dc[i] = s->last_dc[i];
2454 d->mv_bits= s->mv_bits;
2455 d->i_tex_bits= s->i_tex_bits;
2456 d->p_tex_bits= s->p_tex_bits;
2457 d->i_count= s->i_count;
2458 d->f_count= s->f_count;
2459 d->b_count= s->b_count;
2460 d->skip_count= s->skip_count;
2461 d->misc_bits= s->misc_bits;
2465 d->qscale= s->qscale;
2466 d->dquant= s->dquant;
2468 d->esc3_level_length= s->esc3_level_length;
2471 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2474 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2475 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2478 d->mb_skip_run= s->mb_skip_run;
2480 d->last_dc[i] = s->last_dc[i];
2483 d->mv_bits= s->mv_bits;
2484 d->i_tex_bits= s->i_tex_bits;
2485 d->p_tex_bits= s->p_tex_bits;
2486 d->i_count= s->i_count;
2487 d->f_count= s->f_count;
2488 d->b_count= s->b_count;
2489 d->skip_count= s->skip_count;
2490 d->misc_bits= s->misc_bits;
2492 d->mb_intra= s->mb_intra;
2493 d->mb_skipped= s->mb_skipped;
2494 d->mv_type= s->mv_type;
2495 d->mv_dir= s->mv_dir;
2497 if(s->data_partitioning){
2499 d->tex_pb= s->tex_pb;
2503 d->block_last_index[i]= s->block_last_index[i];
2504 d->interlaced_dct= s->interlaced_dct;
2505 d->qscale= s->qscale;
2507 d->esc3_level_length= s->esc3_level_length;
2510 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2511 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2512 int *dmin, int *next_block, int motion_x, int motion_y)
2515 uint8_t *dest_backup[3];
2517 copy_context_before_encode(s, backup, type);
2519 s->block= s->blocks[*next_block];
2520 s->pb= pb[*next_block];
2521 if(s->data_partitioning){
2522 s->pb2 = pb2 [*next_block];
2523 s->tex_pb= tex_pb[*next_block];
2527 memcpy(dest_backup, s->dest, sizeof(s->dest));
2528 s->dest[0] = s->sc.rd_scratchpad;
2529 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2530 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2531 av_assert0(s->linesize >= 32); //FIXME
2534 encode_mb(s, motion_x, motion_y);
2536 score= put_bits_count(&s->pb);
2537 if(s->data_partitioning){
2538 score+= put_bits_count(&s->pb2);
2539 score+= put_bits_count(&s->tex_pb);
2542 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2543 ff_mpv_decode_mb(s, s->block);
2545 score *= s->lambda2;
2546 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2550 memcpy(s->dest, dest_backup, sizeof(s->dest));
2557 copy_context_after_encode(best, s, type);
2561 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2562 uint32_t *sq = ff_square_tab + 256;
2567 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2568 else if(w==8 && h==8)
2569 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2573 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2582 static int sse_mb(MpegEncContext *s){
2586 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2587 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2590 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2591 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2592 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2593 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2595 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2596 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2597 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2600 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2601 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2602 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2605 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2606 MpegEncContext *s= *(void**)arg;
2610 s->me.dia_size= s->avctx->pre_dia_size;
2611 s->first_slice_line=1;
2612 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2613 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2614 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2616 s->first_slice_line=0;
2624 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2625 MpegEncContext *s= *(void**)arg;
2627 ff_check_alignment();
2629 s->me.dia_size= s->avctx->dia_size;
2630 s->first_slice_line=1;
2631 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2632 s->mb_x=0; //for block init below
2633 ff_init_block_index(s);
2634 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2635 s->block_index[0]+=2;
2636 s->block_index[1]+=2;
2637 s->block_index[2]+=2;
2638 s->block_index[3]+=2;
2640 /* compute motion vector & mb_type and store in context */
2641 if(s->pict_type==AV_PICTURE_TYPE_B)
2642 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2644 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2646 s->first_slice_line=0;
2651 static int mb_var_thread(AVCodecContext *c, void *arg){
2652 MpegEncContext *s= *(void**)arg;
2655 ff_check_alignment();
2657 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2658 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2661 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2663 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2665 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2666 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2668 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2669 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2670 s->me.mb_var_sum_temp += varc;
2676 static void write_slice_end(MpegEncContext *s){
2677 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2678 if(s->partitioned_frame){
2679 ff_mpeg4_merge_partitions(s);
2682 ff_mpeg4_stuffing(&s->pb);
2683 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2684 ff_mjpeg_encode_stuffing(s);
2687 avpriv_align_put_bits(&s->pb);
2688 flush_put_bits(&s->pb);
2690 if ((s->avctx->flags & CODEC_FLAG_PASS1) && !s->partitioned_frame)
2691 s->misc_bits+= get_bits_diff(s);
2694 static void write_mb_info(MpegEncContext *s)
2696 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2697 int offset = put_bits_count(&s->pb);
2698 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2699 int gobn = s->mb_y / s->gob_index;
2701 if (CONFIG_H263_ENCODER)
2702 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2703 bytestream_put_le32(&ptr, offset);
2704 bytestream_put_byte(&ptr, s->qscale);
2705 bytestream_put_byte(&ptr, gobn);
2706 bytestream_put_le16(&ptr, mba);
2707 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2708 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2709 /* 4MV not implemented */
2710 bytestream_put_byte(&ptr, 0); /* hmv2 */
2711 bytestream_put_byte(&ptr, 0); /* vmv2 */
2714 static void update_mb_info(MpegEncContext *s, int startcode)
2718 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2719 s->mb_info_size += 12;
2720 s->prev_mb_info = s->last_mb_info;
2723 s->prev_mb_info = put_bits_count(&s->pb)/8;
2724 /* This might have incremented mb_info_size above, and we return without
2725 * actually writing any info into that slot yet. But in that case,
2726 * this will be called again at the start of the after writing the
2727 * start code, actually writing the mb info. */
2731 s->last_mb_info = put_bits_count(&s->pb)/8;
2732 if (!s->mb_info_size)
2733 s->mb_info_size += 12;
2737 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2739 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2740 && s->slice_context_count == 1
2741 && s->pb.buf == s->avctx->internal->byte_buffer) {
2742 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2743 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2745 uint8_t *new_buffer = NULL;
2746 int new_buffer_size = 0;
2748 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2749 s->avctx->internal->byte_buffer_size + size_increase);
2751 return AVERROR(ENOMEM);
2753 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2754 av_free(s->avctx->internal->byte_buffer);
2755 s->avctx->internal->byte_buffer = new_buffer;
2756 s->avctx->internal->byte_buffer_size = new_buffer_size;
2757 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2758 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2759 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2761 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2762 return AVERROR(EINVAL);
2766 static int encode_thread(AVCodecContext *c, void *arg){
2767 MpegEncContext *s= *(void**)arg;
2768 int mb_x, mb_y, pdif = 0;
2769 int chr_h= 16>>s->chroma_y_shift;
2771 MpegEncContext best_s = { 0 }, backup_s;
2772 uint8_t bit_buf[2][MAX_MB_BYTES];
2773 uint8_t bit_buf2[2][MAX_MB_BYTES];
2774 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2775 PutBitContext pb[2], pb2[2], tex_pb[2];
2777 ff_check_alignment();
2780 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2781 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2782 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2785 s->last_bits= put_bits_count(&s->pb);
2796 /* init last dc values */
2797 /* note: quant matrix value (8) is implied here */
2798 s->last_dc[i] = 128 << s->intra_dc_precision;
2800 s->current_picture.error[i] = 0;
2802 if(s->codec_id==AV_CODEC_ID_AMV){
2803 s->last_dc[0] = 128*8/13;
2804 s->last_dc[1] = 128*8/14;
2805 s->last_dc[2] = 128*8/14;
2808 memset(s->last_mv, 0, sizeof(s->last_mv));
2812 switch(s->codec_id){
2813 case AV_CODEC_ID_H263:
2814 case AV_CODEC_ID_H263P:
2815 case AV_CODEC_ID_FLV1:
2816 if (CONFIG_H263_ENCODER)
2817 s->gob_index = H263_GOB_HEIGHT(s->height);
2819 case AV_CODEC_ID_MPEG4:
2820 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2821 ff_mpeg4_init_partitions(s);
2827 s->first_slice_line = 1;
2828 s->ptr_lastgob = s->pb.buf;
2829 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2833 ff_set_qscale(s, s->qscale);
2834 ff_init_block_index(s);
2836 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2837 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2838 int mb_type= s->mb_type[xy];
2842 int size_increase = s->avctx->internal->byte_buffer_size/4
2843 + s->mb_width*MAX_MB_BYTES;
2845 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2846 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2847 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2850 if(s->data_partitioning){
2851 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2852 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2853 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2859 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2860 ff_update_block_index(s);
2862 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2863 ff_h261_reorder_mb_index(s);
2864 xy= s->mb_y*s->mb_stride + s->mb_x;
2865 mb_type= s->mb_type[xy];
2868 /* write gob / video packet header */
2870 int current_packet_size, is_gob_start;
2872 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2874 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2876 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2878 switch(s->codec_id){
2879 case AV_CODEC_ID_H263:
2880 case AV_CODEC_ID_H263P:
2881 if(!s->h263_slice_structured)
2882 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2884 case AV_CODEC_ID_MPEG2VIDEO:
2885 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2886 case AV_CODEC_ID_MPEG1VIDEO:
2887 if(s->mb_skip_run) is_gob_start=0;
2889 case AV_CODEC_ID_MJPEG:
2890 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2895 if(s->start_mb_y != mb_y || mb_x!=0){
2898 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2899 ff_mpeg4_init_partitions(s);
2903 av_assert2((put_bits_count(&s->pb)&7) == 0);
2904 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2906 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2907 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2908 int d = 100 / s->error_rate;
2910 current_packet_size=0;
2911 s->pb.buf_ptr= s->ptr_lastgob;
2912 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2916 if (s->avctx->rtp_callback){
2917 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2918 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2920 update_mb_info(s, 1);
2922 switch(s->codec_id){
2923 case AV_CODEC_ID_MPEG4:
2924 if (CONFIG_MPEG4_ENCODER) {
2925 ff_mpeg4_encode_video_packet_header(s);
2926 ff_mpeg4_clean_buffers(s);
2929 case AV_CODEC_ID_MPEG1VIDEO:
2930 case AV_CODEC_ID_MPEG2VIDEO:
2931 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2932 ff_mpeg1_encode_slice_header(s);
2933 ff_mpeg1_clean_buffers(s);
2936 case AV_CODEC_ID_H263:
2937 case AV_CODEC_ID_H263P:
2938 if (CONFIG_H263_ENCODER)
2939 ff_h263_encode_gob_header(s, mb_y);
2943 if (s->avctx->flags & CODEC_FLAG_PASS1) {
2944 int bits= put_bits_count(&s->pb);
2945 s->misc_bits+= bits - s->last_bits;
2949 s->ptr_lastgob += current_packet_size;
2950 s->first_slice_line=1;
2951 s->resync_mb_x=mb_x;
2952 s->resync_mb_y=mb_y;
2956 if( (s->resync_mb_x == s->mb_x)
2957 && s->resync_mb_y+1 == s->mb_y){
2958 s->first_slice_line=0;
2962 s->dquant=0; //only for QP_RD
2964 update_mb_info(s, 0);
2966 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2968 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2970 copy_context_before_encode(&backup_s, s, -1);
2972 best_s.data_partitioning= s->data_partitioning;
2973 best_s.partitioned_frame= s->partitioned_frame;
2974 if(s->data_partitioning){
2975 backup_s.pb2= s->pb2;
2976 backup_s.tex_pb= s->tex_pb;
2979 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2980 s->mv_dir = MV_DIR_FORWARD;
2981 s->mv_type = MV_TYPE_16X16;
2983 s->mv[0][0][0] = s->p_mv_table[xy][0];
2984 s->mv[0][0][1] = s->p_mv_table[xy][1];
2985 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2986 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2988 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2989 s->mv_dir = MV_DIR_FORWARD;
2990 s->mv_type = MV_TYPE_FIELD;
2993 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2994 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2995 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2997 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2998 &dmin, &next_block, 0, 0);
3000 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3001 s->mv_dir = MV_DIR_FORWARD;
3002 s->mv_type = MV_TYPE_16X16;
3006 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3007 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3009 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3010 s->mv_dir = MV_DIR_FORWARD;
3011 s->mv_type = MV_TYPE_8X8;
3014 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3015 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3017 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3018 &dmin, &next_block, 0, 0);
3020 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3021 s->mv_dir = MV_DIR_FORWARD;
3022 s->mv_type = MV_TYPE_16X16;
3024 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3025 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3026 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3027 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3029 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3030 s->mv_dir = MV_DIR_BACKWARD;
3031 s->mv_type = MV_TYPE_16X16;
3033 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3034 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3035 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3036 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3038 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3039 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3040 s->mv_type = MV_TYPE_16X16;
3042 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3043 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3044 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3045 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3046 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3047 &dmin, &next_block, 0, 0);
3049 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3050 s->mv_dir = MV_DIR_FORWARD;
3051 s->mv_type = MV_TYPE_FIELD;
3054 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3055 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3056 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3058 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3059 &dmin, &next_block, 0, 0);
3061 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3062 s->mv_dir = MV_DIR_BACKWARD;
3063 s->mv_type = MV_TYPE_FIELD;
3066 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3067 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3068 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3070 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3071 &dmin, &next_block, 0, 0);
3073 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3074 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3075 s->mv_type = MV_TYPE_FIELD;
3077 for(dir=0; dir<2; dir++){
3079 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3080 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3081 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3084 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3085 &dmin, &next_block, 0, 0);
3087 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3089 s->mv_type = MV_TYPE_16X16;
3093 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3094 &dmin, &next_block, 0, 0);
3095 if(s->h263_pred || s->h263_aic){
3097 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3099 ff_clean_intra_table_entries(s); //old mode?
3103 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3104 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3105 const int last_qp= backup_s.qscale;
3108 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3109 static const int dquant_tab[4]={-1,1,-2,2};
3110 int storecoefs = s->mb_intra && s->dc_val[0];
3112 av_assert2(backup_s.dquant == 0);
3115 s->mv_dir= best_s.mv_dir;
3116 s->mv_type = MV_TYPE_16X16;
3117 s->mb_intra= best_s.mb_intra;
3118 s->mv[0][0][0] = best_s.mv[0][0][0];
3119 s->mv[0][0][1] = best_s.mv[0][0][1];
3120 s->mv[1][0][0] = best_s.mv[1][0][0];
3121 s->mv[1][0][1] = best_s.mv[1][0][1];
3123 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3124 for(; qpi<4; qpi++){
3125 int dquant= dquant_tab[qpi];
3126 qp= last_qp + dquant;
3127 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3129 backup_s.dquant= dquant;
3132 dc[i]= s->dc_val[0][ s->block_index[i] ];
3133 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3137 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3138 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3139 if(best_s.qscale != qp){
3142 s->dc_val[0][ s->block_index[i] ]= dc[i];
3143 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3150 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3151 int mx= s->b_direct_mv_table[xy][0];
3152 int my= s->b_direct_mv_table[xy][1];
3154 backup_s.dquant = 0;
3155 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3157 ff_mpeg4_set_direct_mv(s, mx, my);
3158 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3159 &dmin, &next_block, mx, my);
3161 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3162 backup_s.dquant = 0;
3163 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3165 ff_mpeg4_set_direct_mv(s, 0, 0);
3166 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3167 &dmin, &next_block, 0, 0);
3169 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3172 coded |= s->block_last_index[i];
3175 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3176 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3177 mx=my=0; //FIXME find the one we actually used
3178 ff_mpeg4_set_direct_mv(s, mx, my);
3179 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3187 s->mv_dir= best_s.mv_dir;
3188 s->mv_type = best_s.mv_type;
3190 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3191 s->mv[0][0][1] = best_s.mv[0][0][1];
3192 s->mv[1][0][0] = best_s.mv[1][0][0];
3193 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3196 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3197 &dmin, &next_block, mx, my);
3202 s->current_picture.qscale_table[xy] = best_s.qscale;
3204 copy_context_after_encode(s, &best_s, -1);
3206 pb_bits_count= put_bits_count(&s->pb);
3207 flush_put_bits(&s->pb);
3208 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3211 if(s->data_partitioning){
3212 pb2_bits_count= put_bits_count(&s->pb2);
3213 flush_put_bits(&s->pb2);
3214 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3215 s->pb2= backup_s.pb2;
3217 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3218 flush_put_bits(&s->tex_pb);
3219 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3220 s->tex_pb= backup_s.tex_pb;
3222 s->last_bits= put_bits_count(&s->pb);
3224 if (CONFIG_H263_ENCODER &&
3225 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3226 ff_h263_update_motion_val(s);
3228 if(next_block==0){ //FIXME 16 vs linesize16
3229 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3230 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3231 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3234 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3235 ff_mpv_decode_mb(s, s->block);
3237 int motion_x = 0, motion_y = 0;
3238 s->mv_type=MV_TYPE_16X16;
3239 // only one MB-Type possible
3242 case CANDIDATE_MB_TYPE_INTRA:
3245 motion_x= s->mv[0][0][0] = 0;
3246 motion_y= s->mv[0][0][1] = 0;
3248 case CANDIDATE_MB_TYPE_INTER:
3249 s->mv_dir = MV_DIR_FORWARD;
3251 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3252 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3254 case CANDIDATE_MB_TYPE_INTER_I:
3255 s->mv_dir = MV_DIR_FORWARD;
3256 s->mv_type = MV_TYPE_FIELD;
3259 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3260 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3261 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3264 case CANDIDATE_MB_TYPE_INTER4V:
3265 s->mv_dir = MV_DIR_FORWARD;
3266 s->mv_type = MV_TYPE_8X8;
3269 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3270 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3273 case CANDIDATE_MB_TYPE_DIRECT:
3274 if (CONFIG_MPEG4_ENCODER) {
3275 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3277 motion_x=s->b_direct_mv_table[xy][0];
3278 motion_y=s->b_direct_mv_table[xy][1];
3279 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3282 case CANDIDATE_MB_TYPE_DIRECT0:
3283 if (CONFIG_MPEG4_ENCODER) {
3284 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3286 ff_mpeg4_set_direct_mv(s, 0, 0);
3289 case CANDIDATE_MB_TYPE_BIDIR:
3290 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3292 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3293 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3294 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3295 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3297 case CANDIDATE_MB_TYPE_BACKWARD:
3298 s->mv_dir = MV_DIR_BACKWARD;
3300 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3301 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3303 case CANDIDATE_MB_TYPE_FORWARD:
3304 s->mv_dir = MV_DIR_FORWARD;
3306 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3307 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3309 case CANDIDATE_MB_TYPE_FORWARD_I:
3310 s->mv_dir = MV_DIR_FORWARD;
3311 s->mv_type = MV_TYPE_FIELD;
3314 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3315 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3316 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3319 case CANDIDATE_MB_TYPE_BACKWARD_I:
3320 s->mv_dir = MV_DIR_BACKWARD;
3321 s->mv_type = MV_TYPE_FIELD;
3324 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3325 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3326 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3329 case CANDIDATE_MB_TYPE_BIDIR_I:
3330 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3331 s->mv_type = MV_TYPE_FIELD;
3333 for(dir=0; dir<2; dir++){
3335 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3336 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3337 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3342 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3345 encode_mb(s, motion_x, motion_y);
3347 // RAL: Update last macroblock type
3348 s->last_mv_dir = s->mv_dir;
3350 if (CONFIG_H263_ENCODER &&
3351 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3352 ff_h263_update_motion_val(s);
3354 ff_mpv_decode_mb(s, s->block);
3357 /* clean the MV table in IPS frames for direct mode in B frames */
3358 if(s->mb_intra /* && I,P,S_TYPE */){
3359 s->p_mv_table[xy][0]=0;
3360 s->p_mv_table[xy][1]=0;
3363 if (s->avctx->flags & CODEC_FLAG_PSNR) {
3367 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3368 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3370 s->current_picture.error[0] += sse(
3371 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3372 s->dest[0], w, h, s->linesize);
3373 s->current_picture.error[1] += sse(
3374 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3375 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3376 s->current_picture.error[2] += sse(
3377 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3378 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3381 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3382 ff_h263_loop_filter(s);
3384 ff_dlog(s->avctx, "MB %d %d bits\n",
3385 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3389 //not beautiful here but we must write it before flushing so it has to be here
3390 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3391 ff_msmpeg4_encode_ext_header(s);
3395 /* Send the last GOB if RTP */
3396 if (s->avctx->rtp_callback) {
3397 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3398 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3399 /* Call the RTP callback to send the last GOB */
3401 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3407 #define MERGE(field) dst->field += src->field; src->field=0
3408 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3409 MERGE(me.scene_change_score);
3410 MERGE(me.mc_mb_var_sum_temp);
3411 MERGE(me.mb_var_sum_temp);
3414 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3417 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3418 MERGE(dct_count[1]);
3427 MERGE(er.error_count);
3428 MERGE(padding_bug_score);
3429 MERGE(current_picture.error[0]);
3430 MERGE(current_picture.error[1]);
3431 MERGE(current_picture.error[2]);
3433 if(dst->avctx->noise_reduction){
3434 for(i=0; i<64; i++){
3435 MERGE(dct_error_sum[0][i]);
3436 MERGE(dct_error_sum[1][i]);
3440 assert(put_bits_count(&src->pb) % 8 ==0);
3441 assert(put_bits_count(&dst->pb) % 8 ==0);
3442 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3443 flush_put_bits(&dst->pb);
3446 static int estimate_qp(MpegEncContext *s, int dry_run){
3447 if (s->next_lambda){
3448 s->current_picture_ptr->f->quality =
3449 s->current_picture.f->quality = s->next_lambda;
3450 if(!dry_run) s->next_lambda= 0;
3451 } else if (!s->fixed_qscale) {
3452 s->current_picture_ptr->f->quality =
3453 s->current_picture.f->quality = ff_rate_estimate_qscale(s, dry_run);
3454 if (s->current_picture.f->quality < 0)
3458 if(s->adaptive_quant){
3459 switch(s->codec_id){
3460 case AV_CODEC_ID_MPEG4:
3461 if (CONFIG_MPEG4_ENCODER)
3462 ff_clean_mpeg4_qscales(s);
3464 case AV_CODEC_ID_H263:
3465 case AV_CODEC_ID_H263P:
3466 case AV_CODEC_ID_FLV1:
3467 if (CONFIG_H263_ENCODER)
3468 ff_clean_h263_qscales(s);
3471 ff_init_qscale_tab(s);
3474 s->lambda= s->lambda_table[0];
3477 s->lambda = s->current_picture.f->quality;
3482 /* must be called before writing the header */
3483 static void set_frame_distances(MpegEncContext * s){
3484 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3485 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3487 if(s->pict_type==AV_PICTURE_TYPE_B){
3488 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3489 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3491 s->pp_time= s->time - s->last_non_b_time;
3492 s->last_non_b_time= s->time;
3493 assert(s->picture_number==0 || s->pp_time > 0);
3497 static int encode_picture(MpegEncContext *s, int picture_number)
3501 int context_count = s->slice_context_count;
3503 s->picture_number = picture_number;
3505 /* Reset the average MB variance */
3506 s->me.mb_var_sum_temp =
3507 s->me.mc_mb_var_sum_temp = 0;
3509 /* we need to initialize some time vars before we can encode b-frames */
3510 // RAL: Condition added for MPEG1VIDEO
3511 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3512 set_frame_distances(s);
3513 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3514 ff_set_mpeg4_time(s);
3516 s->me.scene_change_score=0;
3518 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3520 if(s->pict_type==AV_PICTURE_TYPE_I){
3521 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3522 else s->no_rounding=0;
3523 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3524 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3525 s->no_rounding ^= 1;
3528 if (s->avctx->flags & CODEC_FLAG_PASS2) {
3529 if (estimate_qp(s,1) < 0)
3531 ff_get_2pass_fcode(s);
3532 } else if (!(s->avctx->flags & CODEC_FLAG_QSCALE)) {
3533 if(s->pict_type==AV_PICTURE_TYPE_B)
3534 s->lambda= s->last_lambda_for[s->pict_type];
3536 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3540 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3541 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3542 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3543 s->q_chroma_intra_matrix = s->q_intra_matrix;
3544 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3547 s->mb_intra=0; //for the rate distortion & bit compare functions
3548 for(i=1; i<context_count; i++){
3549 ret = ff_update_duplicate_context(s->thread_context[i], s);
3557 /* Estimate motion for every MB */
3558 if(s->pict_type != AV_PICTURE_TYPE_I){
3559 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3560 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3561 if (s->pict_type != AV_PICTURE_TYPE_B) {
3562 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3563 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3567 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3568 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3570 for(i=0; i<s->mb_stride*s->mb_height; i++)
3571 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3573 if(!s->fixed_qscale){
3574 /* finding spatial complexity for I-frame rate control */
3575 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3578 for(i=1; i<context_count; i++){
3579 merge_context_after_me(s, s->thread_context[i]);
3581 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3582 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3585 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3586 s->pict_type= AV_PICTURE_TYPE_I;
3587 for(i=0; i<s->mb_stride*s->mb_height; i++)
3588 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3589 if(s->msmpeg4_version >= 3)
3591 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3592 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3596 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3597 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3599 if (s->avctx->flags & CODEC_FLAG_INTERLACED_ME) {
3601 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3602 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3603 s->f_code= FFMAX3(s->f_code, a, b);
3606 ff_fix_long_p_mvs(s);
3607 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3608 if (s->avctx->flags & CODEC_FLAG_INTERLACED_ME) {
3612 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3613 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3618 if(s->pict_type==AV_PICTURE_TYPE_B){
3621 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3622 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3623 s->f_code = FFMAX(a, b);
3625 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3626 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3627 s->b_code = FFMAX(a, b);
3629 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3630 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3631 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3632 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3633 if (s->avctx->flags & CODEC_FLAG_INTERLACED_ME) {
3635 for(dir=0; dir<2; dir++){
3638 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3639 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3640 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3641 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3649 if (estimate_qp(s, 0) < 0)
3652 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3653 s->pict_type == AV_PICTURE_TYPE_I &&
3654 !(s->avctx->flags & CODEC_FLAG_QSCALE))
3655 s->qscale= 3; //reduce clipping problems
3657 if (s->out_format == FMT_MJPEG) {
3658 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3659 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3661 if (s->avctx->intra_matrix) {
3663 luma_matrix = s->avctx->intra_matrix;
3665 if (s->avctx->chroma_intra_matrix)
3666 chroma_matrix = s->avctx->chroma_intra_matrix;
3668 /* for mjpeg, we do include qscale in the matrix */
3670 int j = s->idsp.idct_permutation[i];
3672 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3673 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3675 s->y_dc_scale_table=
3676 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3677 s->chroma_intra_matrix[0] =
3678 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3679 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3680 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3681 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3682 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3685 if(s->codec_id == AV_CODEC_ID_AMV){
3686 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3687 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3689 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3691 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3692 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3694 s->y_dc_scale_table= y;
3695 s->c_dc_scale_table= c;
3696 s->intra_matrix[0] = 13;
3697 s->chroma_intra_matrix[0] = 14;
3698 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3699 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3700 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3701 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3705 //FIXME var duplication
3706 s->current_picture_ptr->f->key_frame =
3707 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3708 s->current_picture_ptr->f->pict_type =
3709 s->current_picture.f->pict_type = s->pict_type;
3711 if (s->current_picture.f->key_frame)
3712 s->picture_in_gop_number=0;
3714 s->mb_x = s->mb_y = 0;
3715 s->last_bits= put_bits_count(&s->pb);
3716 switch(s->out_format) {
3718 if (CONFIG_MJPEG_ENCODER)
3719 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3720 s->intra_matrix, s->chroma_intra_matrix);
3723 if (CONFIG_H261_ENCODER)
3724 ff_h261_encode_picture_header(s, picture_number);
3727 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3728 ff_wmv2_encode_picture_header(s, picture_number);
3729 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3730 ff_msmpeg4_encode_picture_header(s, picture_number);
3731 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3732 ff_mpeg4_encode_picture_header(s, picture_number);
3733 else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3734 ret = ff_rv10_encode_picture_header(s, picture_number);
3738 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3739 ff_rv20_encode_picture_header(s, picture_number);
3740 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3741 ff_flv_encode_picture_header(s, picture_number);
3742 else if (CONFIG_H263_ENCODER)
3743 ff_h263_encode_picture_header(s, picture_number);
3746 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3747 ff_mpeg1_encode_picture_header(s, picture_number);
3752 bits= put_bits_count(&s->pb);
3753 s->header_bits= bits - s->last_bits;
3755 for(i=1; i<context_count; i++){
3756 update_duplicate_context_after_me(s->thread_context[i], s);
3758 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3759 for(i=1; i<context_count; i++){
3760 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3761 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3762 merge_context_after_encode(s, s->thread_context[i]);
3768 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3769 const int intra= s->mb_intra;
3772 s->dct_count[intra]++;
3774 for(i=0; i<64; i++){
3775 int level= block[i];
3779 s->dct_error_sum[intra][i] += level;
3780 level -= s->dct_offset[intra][i];
3781 if(level<0) level=0;
3783 s->dct_error_sum[intra][i] -= level;
3784 level += s->dct_offset[intra][i];
3785 if(level>0) level=0;
3792 static int dct_quantize_trellis_c(MpegEncContext *s,
3793 int16_t *block, int n,
3794 int qscale, int *overflow){
3796 const uint16_t *matrix;
3797 const uint8_t *scantable= s->intra_scantable.scantable;
3798 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3800 unsigned int threshold1, threshold2;
3812 int coeff_count[64];
3813 int qmul, qadd, start_i, last_non_zero, i, dc;
3814 const int esc_length= s->ac_esc_length;
3816 uint8_t * last_length;
3817 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3819 s->fdsp.fdct(block);
3821 if(s->dct_error_sum)
3822 s->denoise_dct(s, block);
3824 qadd= ((qscale-1)|1)*8;
3835 /* For AIC we skip quant/dequant of INTRADC */
3840 /* note: block[0] is assumed to be positive */
3841 block[0] = (block[0] + (q >> 1)) / q;
3844 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3845 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3846 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3847 bias= 1<<(QMAT_SHIFT-1);
3849 if (n > 3 && s->intra_chroma_ac_vlc_length) {
3850 length = s->intra_chroma_ac_vlc_length;
3851 last_length= s->intra_chroma_ac_vlc_last_length;
3853 length = s->intra_ac_vlc_length;
3854 last_length= s->intra_ac_vlc_last_length;
3859 qmat = s->q_inter_matrix[qscale];
3860 matrix = s->inter_matrix;
3861 length = s->inter_ac_vlc_length;
3862 last_length= s->inter_ac_vlc_last_length;
3866 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3867 threshold2= (threshold1<<1);
3869 for(i=63; i>=start_i; i--) {
3870 const int j = scantable[i];
3871 int level = block[j] * qmat[j];
3873 if(((unsigned)(level+threshold1))>threshold2){
3879 for(i=start_i; i<=last_non_zero; i++) {
3880 const int j = scantable[i];
3881 int level = block[j] * qmat[j];
3883 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3884 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3885 if(((unsigned)(level+threshold1))>threshold2){
3887 level= (bias + level)>>QMAT_SHIFT;
3889 coeff[1][i]= level-1;
3890 // coeff[2][k]= level-2;
3892 level= (bias - level)>>QMAT_SHIFT;
3893 coeff[0][i]= -level;
3894 coeff[1][i]= -level+1;
3895 // coeff[2][k]= -level+2;
3897 coeff_count[i]= FFMIN(level, 2);
3898 av_assert2(coeff_count[i]);
3901 coeff[0][i]= (level>>31)|1;
3906 *overflow= s->max_qcoeff < max; //overflow might have happened
3908 if(last_non_zero < start_i){
3909 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3910 return last_non_zero;
3913 score_tab[start_i]= 0;
3914 survivor[0]= start_i;
3917 for(i=start_i; i<=last_non_zero; i++){
3918 int level_index, j, zero_distortion;
3919 int dct_coeff= FFABS(block[ scantable[i] ]);
3920 int best_score=256*256*256*120;
3922 if (s->fdsp.fdct == ff_fdct_ifast)
3923 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3924 zero_distortion= dct_coeff*dct_coeff;
3926 for(level_index=0; level_index < coeff_count[i]; level_index++){
3928 int level= coeff[level_index][i];
3929 const int alevel= FFABS(level);
3934 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
3935 unquant_coeff= alevel*qmul + qadd;
3936 } else if(s->out_format == FMT_MJPEG) {
3937 j = s->idsp.idct_permutation[scantable[i]];
3938 unquant_coeff = alevel * matrix[j] * 8;
3940 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
3942 unquant_coeff = (int)( alevel * qscale * matrix[j]) >> 3;
3943 unquant_coeff = (unquant_coeff - 1) | 1;
3945 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) matrix[j])) >> 4;
3946 unquant_coeff = (unquant_coeff - 1) | 1;
3951 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3953 if((level&(~127)) == 0){
3954 for(j=survivor_count-1; j>=0; j--){
3955 int run= i - survivor[j];
3956 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3957 score += score_tab[i-run];
3959 if(score < best_score){
3962 level_tab[i+1]= level-64;
3966 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
3967 for(j=survivor_count-1; j>=0; j--){
3968 int run= i - survivor[j];
3969 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3970 score += score_tab[i-run];
3971 if(score < last_score){
3974 last_level= level-64;
3980 distortion += esc_length*lambda;
3981 for(j=survivor_count-1; j>=0; j--){
3982 int run= i - survivor[j];
3983 int score= distortion + score_tab[i-run];
3985 if(score < best_score){
3988 level_tab[i+1]= level-64;
3992 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
3993 for(j=survivor_count-1; j>=0; j--){
3994 int run= i - survivor[j];
3995 int score= distortion + score_tab[i-run];
3996 if(score < last_score){
3999 last_level= level-64;
4007 score_tab[i+1]= best_score;
4009 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
4010 if(last_non_zero <= 27){
4011 for(; survivor_count; survivor_count--){
4012 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4016 for(; survivor_count; survivor_count--){
4017 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4022 survivor[ survivor_count++ ]= i+1;
4025 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4026 last_score= 256*256*256*120;
4027 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4028 int score= score_tab[i];
4029 if(i) score += lambda*2; //FIXME exacter?
4031 if(score < last_score){
4034 last_level= level_tab[i];
4035 last_run= run_tab[i];
4040 s->coded_score[n] = last_score;
4042 dc= FFABS(block[0]);
4043 last_non_zero= last_i - 1;
4044 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4046 if(last_non_zero < start_i)
4047 return last_non_zero;
4049 if(last_non_zero == 0 && start_i == 0){
4051 int best_score= dc * dc;
4053 for(i=0; i<coeff_count[0]; i++){
4054 int level= coeff[i][0];
4055 int alevel= FFABS(level);
4056 int unquant_coeff, score, distortion;
4058 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4059 unquant_coeff= (alevel*qmul + qadd)>>3;
4061 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) matrix[0])) >> 4;
4062 unquant_coeff = (unquant_coeff - 1) | 1;
4064 unquant_coeff = (unquant_coeff + 4) >> 3;
4065 unquant_coeff<<= 3 + 3;
4067 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4069 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4070 else score= distortion + esc_length*lambda;
4072 if(score < best_score){
4074 best_level= level - 64;
4077 block[0]= best_level;
4078 s->coded_score[n] = best_score - dc*dc;
4079 if(best_level == 0) return -1;
4080 else return last_non_zero;
4084 av_assert2(last_level);
4086 block[ perm_scantable[last_non_zero] ]= last_level;
4089 for(; i>start_i; i -= run_tab[i] + 1){
4090 block[ perm_scantable[i-1] ]= level_tab[i];
4093 return last_non_zero;
4096 //#define REFINE_STATS 1
4097 static int16_t basis[64][64];
4099 static void build_basis(uint8_t *perm){
4106 double s= 0.25*(1<<BASIS_SHIFT);
4108 int perm_index= perm[index];
4109 if(i==0) s*= sqrt(0.5);
4110 if(j==0) s*= sqrt(0.5);
4111 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4118 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4119 int16_t *block, int16_t *weight, int16_t *orig,
4122 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4123 const uint8_t *scantable= s->intra_scantable.scantable;
4124 const uint8_t *perm_scantable= s->intra_scantable.permutated;
4125 // unsigned int threshold1, threshold2;
4130 int qmul, qadd, start_i, last_non_zero, i, dc;
4132 uint8_t * last_length;
4134 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4137 static int after_last=0;
4138 static int to_zero=0;
4139 static int from_zero=0;
4142 static int messed_sign=0;
4145 if(basis[0][0] == 0)
4146 build_basis(s->idsp.idct_permutation);
4157 /* For AIC we skip quant/dequant of INTRADC */
4161 q <<= RECON_SHIFT-3;
4162 /* note: block[0] is assumed to be positive */
4164 // block[0] = (block[0] + (q >> 1)) / q;
4166 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4167 // bias= 1<<(QMAT_SHIFT-1);
4168 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4169 length = s->intra_chroma_ac_vlc_length;
4170 last_length= s->intra_chroma_ac_vlc_last_length;
4172 length = s->intra_ac_vlc_length;
4173 last_length= s->intra_ac_vlc_last_length;
4178 length = s->inter_ac_vlc_length;
4179 last_length= s->inter_ac_vlc_last_length;
4181 last_non_zero = s->block_last_index[n];
4186 dc += (1<<(RECON_SHIFT-1));
4187 for(i=0; i<64; i++){
4188 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
4191 STOP_TIMER("memset rem[]")}
4194 for(i=0; i<64; i++){
4199 w= FFABS(weight[i]) + qns*one;
4200 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4203 // w=weight[i] = (63*qns + (w/2)) / w;
4206 av_assert2(w<(1<<6));
4209 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4215 for(i=start_i; i<=last_non_zero; i++){
4216 int j= perm_scantable[i];
4217 const int level= block[j];
4221 if(level<0) coeff= qmul*level - qadd;
4222 else coeff= qmul*level + qadd;
4223 run_tab[rle_index++]=run;
4226 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4232 if(last_non_zero>0){
4233 STOP_TIMER("init rem[]")
4240 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4243 int run2, best_unquant_change=0, analyze_gradient;
4247 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4249 if(analyze_gradient){
4253 for(i=0; i<64; i++){
4256 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4259 STOP_TIMER("rem*w*w")}
4269 const int level= block[0];
4270 int change, old_coeff;
4272 av_assert2(s->mb_intra);
4276 for(change=-1; change<=1; change+=2){
4277 int new_level= level + change;
4278 int score, new_coeff;
4280 new_coeff= q*new_level;
4281 if(new_coeff >= 2048 || new_coeff < 0)
4284 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4285 new_coeff - old_coeff);
4286 if(score<best_score){
4289 best_change= change;
4290 best_unquant_change= new_coeff - old_coeff;
4297 run2= run_tab[rle_index++];
4301 for(i=start_i; i<64; i++){
4302 int j= perm_scantable[i];
4303 const int level= block[j];
4304 int change, old_coeff;
4306 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4310 if(level<0) old_coeff= qmul*level - qadd;
4311 else old_coeff= qmul*level + qadd;
4312 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4316 av_assert2(run2>=0 || i >= last_non_zero );
4319 for(change=-1; change<=1; change+=2){
4320 int new_level= level + change;
4321 int score, new_coeff, unquant_change;
4324 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4328 if(new_level<0) new_coeff= qmul*new_level - qadd;
4329 else new_coeff= qmul*new_level + qadd;
4330 if(new_coeff >= 2048 || new_coeff <= -2048)
4332 //FIXME check for overflow
4335 if(level < 63 && level > -63){
4336 if(i < last_non_zero)
4337 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4338 - length[UNI_AC_ENC_INDEX(run, level+64)];
4340 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4341 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4344 av_assert2(FFABS(new_level)==1);
4346 if(analyze_gradient){
4347 int g= d1[ scantable[i] ];
4348 if(g && (g^new_level) >= 0)
4352 if(i < last_non_zero){
4353 int next_i= i + run2 + 1;
4354 int next_level= block[ perm_scantable[next_i] ] + 64;
4356 if(next_level&(~127))
4359 if(next_i < last_non_zero)
4360 score += length[UNI_AC_ENC_INDEX(run, 65)]
4361 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4362 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4364 score += length[UNI_AC_ENC_INDEX(run, 65)]
4365 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4366 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4368 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4370 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4371 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4377 av_assert2(FFABS(level)==1);
4379 if(i < last_non_zero){
4380 int next_i= i + run2 + 1;
4381 int next_level= block[ perm_scantable[next_i] ] + 64;
4383 if(next_level&(~127))
4386 if(next_i < last_non_zero)
4387 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4388 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4389 - length[UNI_AC_ENC_INDEX(run, 65)];
4391 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4392 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4393 - length[UNI_AC_ENC_INDEX(run, 65)];
4395 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4397 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4398 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4405 unquant_change= new_coeff - old_coeff;
4406 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4408 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4410 if(score<best_score){
4413 best_change= change;
4414 best_unquant_change= unquant_change;
4418 prev_level= level + 64;
4419 if(prev_level&(~127))
4428 STOP_TIMER("iterative step")}
4432 int j= perm_scantable[ best_coeff ];
4434 block[j] += best_change;
4436 if(best_coeff > last_non_zero){
4437 last_non_zero= best_coeff;
4438 av_assert2(block[j]);
4445 if(block[j] - best_change){
4446 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4458 for(; last_non_zero>=start_i; last_non_zero--){
4459 if(block[perm_scantable[last_non_zero]])
4465 if(256*256*256*64 % count == 0){
4466 av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4471 for(i=start_i; i<=last_non_zero; i++){
4472 int j= perm_scantable[i];
4473 const int level= block[j];
4476 run_tab[rle_index++]=run;
4483 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4489 if(last_non_zero>0){
4490 STOP_TIMER("iterative search")
4495 return last_non_zero;
4499 * Permute an 8x8 block according to permuatation.
4500 * @param block the block which will be permuted according to
4501 * the given permutation vector
4502 * @param permutation the permutation vector
4503 * @param last the last non zero coefficient in scantable order, used to
4504 * speed the permutation up
4505 * @param scantable the used scantable, this is only used to speed the
4506 * permutation up, the block is not (inverse) permutated
4507 * to scantable order!
4509 static void block_permute(int16_t *block, uint8_t *permutation,
4510 const uint8_t *scantable, int last)
4517 //FIXME it is ok but not clean and might fail for some permutations
4518 // if (permutation[1] == 1)
4521 for (i = 0; i <= last; i++) {
4522 const int j = scantable[i];
4527 for (i = 0; i <= last; i++) {
4528 const int j = scantable[i];
4529 const int perm_j = permutation[j];
4530 block[perm_j] = temp[j];
4534 int ff_dct_quantize_c(MpegEncContext *s,
4535 int16_t *block, int n,
4536 int qscale, int *overflow)
4538 int i, j, level, last_non_zero, q, start_i;
4540 const uint8_t *scantable= s->intra_scantable.scantable;
4543 unsigned int threshold1, threshold2;
4545 s->fdsp.fdct(block);
4547 if(s->dct_error_sum)
4548 s->denoise_dct(s, block);
4558 /* For AIC we skip quant/dequant of INTRADC */
4561 /* note: block[0] is assumed to be positive */
4562 block[0] = (block[0] + (q >> 1)) / q;
4565 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4566 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4570 qmat = s->q_inter_matrix[qscale];
4571 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4573 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4574 threshold2= (threshold1<<1);
4575 for(i=63;i>=start_i;i--) {
4577 level = block[j] * qmat[j];
4579 if(((unsigned)(level+threshold1))>threshold2){
4586 for(i=start_i; i<=last_non_zero; i++) {
4588 level = block[j] * qmat[j];
4590 // if( bias+level >= (1<<QMAT_SHIFT)
4591 // || bias-level >= (1<<QMAT_SHIFT)){
4592 if(((unsigned)(level+threshold1))>threshold2){
4594 level= (bias + level)>>QMAT_SHIFT;
4597 level= (bias - level)>>QMAT_SHIFT;
4605 *overflow= s->max_qcoeff < max; //overflow might have happened
4607 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4608 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4609 block_permute(block, s->idsp.idct_permutation,
4610 scantable, last_non_zero);
4612 return last_non_zero;
4615 #define OFFSET(x) offsetof(MpegEncContext, x)
4616 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4617 static const AVOption h263_options[] = {
4618 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4619 { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4620 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4625 static const AVClass h263_class = {
4626 .class_name = "H.263 encoder",
4627 .item_name = av_default_item_name,
4628 .option = h263_options,
4629 .version = LIBAVUTIL_VERSION_INT,
4632 AVCodec ff_h263_encoder = {
4634 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4635 .type = AVMEDIA_TYPE_VIDEO,
4636 .id = AV_CODEC_ID_H263,
4637 .priv_data_size = sizeof(MpegEncContext),
4638 .init = ff_mpv_encode_init,
4639 .encode2 = ff_mpv_encode_picture,
4640 .close = ff_mpv_encode_end,
4641 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4642 .priv_class = &h263_class,
4645 static const AVOption h263p_options[] = {
4646 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4647 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4648 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4649 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4653 static const AVClass h263p_class = {
4654 .class_name = "H.263p encoder",
4655 .item_name = av_default_item_name,
4656 .option = h263p_options,
4657 .version = LIBAVUTIL_VERSION_INT,
4660 AVCodec ff_h263p_encoder = {
4662 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4663 .type = AVMEDIA_TYPE_VIDEO,
4664 .id = AV_CODEC_ID_H263P,
4665 .priv_data_size = sizeof(MpegEncContext),
4666 .init = ff_mpv_encode_init,
4667 .encode2 = ff_mpv_encode_picture,
4668 .close = ff_mpv_encode_end,
4669 .capabilities = CODEC_CAP_SLICE_THREADS,
4670 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4671 .priv_class = &h263p_class,
4674 static const AVClass msmpeg4v2_class = {
4675 .class_name = "msmpeg4v2 encoder",
4676 .item_name = av_default_item_name,
4677 .option = ff_mpv_generic_options,
4678 .version = LIBAVUTIL_VERSION_INT,
4681 AVCodec ff_msmpeg4v2_encoder = {
4682 .name = "msmpeg4v2",
4683 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4684 .type = AVMEDIA_TYPE_VIDEO,
4685 .id = AV_CODEC_ID_MSMPEG4V2,
4686 .priv_data_size = sizeof(MpegEncContext),
4687 .init = ff_mpv_encode_init,
4688 .encode2 = ff_mpv_encode_picture,
4689 .close = ff_mpv_encode_end,
4690 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4691 .priv_class = &msmpeg4v2_class,
4694 static const AVClass msmpeg4v3_class = {
4695 .class_name = "msmpeg4v3 encoder",
4696 .item_name = av_default_item_name,
4697 .option = ff_mpv_generic_options,
4698 .version = LIBAVUTIL_VERSION_INT,
4701 AVCodec ff_msmpeg4v3_encoder = {
4703 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4704 .type = AVMEDIA_TYPE_VIDEO,
4705 .id = AV_CODEC_ID_MSMPEG4V3,
4706 .priv_data_size = sizeof(MpegEncContext),
4707 .init = ff_mpv_encode_init,
4708 .encode2 = ff_mpv_encode_picture,
4709 .close = ff_mpv_encode_end,
4710 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4711 .priv_class = &msmpeg4v3_class,
4714 static const AVClass wmv1_class = {
4715 .class_name = "wmv1 encoder",
4716 .item_name = av_default_item_name,
4717 .option = ff_mpv_generic_options,
4718 .version = LIBAVUTIL_VERSION_INT,
4721 AVCodec ff_wmv1_encoder = {
4723 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4724 .type = AVMEDIA_TYPE_VIDEO,
4725 .id = AV_CODEC_ID_WMV1,
4726 .priv_data_size = sizeof(MpegEncContext),
4727 .init = ff_mpv_encode_init,
4728 .encode2 = ff_mpv_encode_picture,
4729 .close = ff_mpv_encode_end,
4730 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4731 .priv_class = &wmv1_class,