2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
32 #include "libavutil/internal.h"
33 #include "libavutil/intmath.h"
34 #include "libavutil/mathematics.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/timer.h"
42 #include "mpegvideo.h"
43 #include "mpegvideodata.h"
47 #include "mjpegenc_common.h"
49 #include "mpegutils.h"
52 #include "pixblockdsp.h"
56 #include "aandcttab.h"
58 #include "mpeg4video.h"
60 #include "bytestream.h"
66 #define QUANT_BIAS_SHIFT 8
68 #define QMAT_SHIFT_MMX 16
71 static int encode_picture(MpegEncContext *s, int picture_number);
72 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
73 static int sse_mb(MpegEncContext *s);
74 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
75 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
77 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
78 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
80 const AVOption ff_mpv_generic_options[] = {
85 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
86 uint16_t (*qmat16)[2][64],
87 const uint16_t *quant_matrix,
88 int bias, int qmin, int qmax, int intra)
90 FDCTDSPContext *fdsp = &s->fdsp;
94 for (qscale = qmin; qscale <= qmax; qscale++) {
96 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
98 fdsp->fdct == ff_faandct ||
99 #endif /* CONFIG_FAANDCT */
100 fdsp->fdct == ff_jpeg_fdct_islow_10) {
101 for (i = 0; i < 64; i++) {
102 const int j = s->idsp.idct_permutation[i];
103 int64_t den = (int64_t) qscale * quant_matrix[j];
104 /* 16 <= qscale * quant_matrix[i] <= 7905
105 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
106 * 19952 <= x <= 249205026
107 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
108 * 3444240 >= (1 << 36) / (x) >= 275 */
110 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / den);
112 } else if (fdsp->fdct == ff_fdct_ifast) {
113 for (i = 0; i < 64; i++) {
114 const int j = s->idsp.idct_permutation[i];
115 int64_t den = ff_aanscales[i] * (int64_t) qscale * quant_matrix[j];
116 /* 16 <= qscale * quant_matrix[i] <= 7905
117 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
118 * 19952 <= x <= 249205026
119 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
120 * 3444240 >= (1 << 36) / (x) >= 275 */
122 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) / den);
125 for (i = 0; i < 64; i++) {
126 const int j = s->idsp.idct_permutation[i];
127 int64_t den = (int64_t) qscale * quant_matrix[j];
128 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
129 * Assume x = qscale * quant_matrix[i]
131 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
132 * so 32768 >= (1 << 19) / (x) >= 67 */
133 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / den);
134 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
135 // (qscale * quant_matrix[i]);
136 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / den;
138 if (qmat16[qscale][0][i] == 0 ||
139 qmat16[qscale][0][i] == 128 * 256)
140 qmat16[qscale][0][i] = 128 * 256 - 1;
141 qmat16[qscale][1][i] =
142 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
143 qmat16[qscale][0][i]);
147 for (i = intra; i < 64; i++) {
149 if (fdsp->fdct == ff_fdct_ifast) {
150 max = (8191LL * ff_aanscales[i]) >> 14;
152 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
158 av_log(NULL, AV_LOG_INFO,
159 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
164 static inline void update_qscale(MpegEncContext *s)
166 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
167 (FF_LAMBDA_SHIFT + 7);
168 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
170 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
174 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
180 for (i = 0; i < 64; i++) {
181 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
188 * init s->current_picture.qscale_table from s->lambda_table
190 void ff_init_qscale_tab(MpegEncContext *s)
192 int8_t * const qscale_table = s->current_picture.qscale_table;
195 for (i = 0; i < s->mb_num; i++) {
196 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
197 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
198 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
203 static void update_duplicate_context_after_me(MpegEncContext *dst,
206 #define COPY(a) dst->a= src->a
208 COPY(current_picture);
214 COPY(picture_in_gop_number);
215 COPY(gop_picture_number);
216 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
217 COPY(progressive_frame); // FIXME don't set in encode_header
218 COPY(partitioned_frame); // FIXME don't set in encode_header
223 * Set the given MpegEncContext to defaults for encoding.
224 * the changed fields will not depend upon the prior state of the MpegEncContext.
226 static void mpv_encode_defaults(MpegEncContext *s)
229 ff_mpv_common_defaults(s);
231 for (i = -16; i < 16; i++) {
232 default_fcode_tab[i + MAX_MV] = 1;
234 s->me.mv_penalty = default_mv_penalty;
235 s->fcode_tab = default_fcode_tab;
237 s->input_picture_number = 0;
238 s->picture_in_gop_number = 0;
241 av_cold int ff_dct_encode_init(MpegEncContext *s) {
243 ff_dct_encode_init_x86(s);
245 if (CONFIG_H263_ENCODER)
246 ff_h263dsp_init(&s->h263dsp);
247 if (!s->dct_quantize)
248 s->dct_quantize = ff_dct_quantize_c;
250 s->denoise_dct = denoise_dct_c;
251 s->fast_dct_quantize = s->dct_quantize;
252 if (s->avctx->trellis)
253 s->dct_quantize = dct_quantize_trellis_c;
258 /* init video encoder */
259 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
261 MpegEncContext *s = avctx->priv_data;
262 int i, ret, format_supported;
264 mpv_encode_defaults(s);
266 switch (avctx->codec_id) {
267 case AV_CODEC_ID_MPEG2VIDEO:
268 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
269 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
270 av_log(avctx, AV_LOG_ERROR,
271 "only YUV420 and YUV422 are supported\n");
275 case AV_CODEC_ID_MJPEG:
276 case AV_CODEC_ID_AMV:
277 format_supported = 0;
278 /* JPEG color space */
279 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
280 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
281 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
282 (avctx->color_range == AVCOL_RANGE_JPEG &&
283 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
284 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
285 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
286 format_supported = 1;
287 /* MPEG color space */
288 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
289 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
290 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
291 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
292 format_supported = 1;
294 if (!format_supported) {
295 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
300 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
301 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
306 switch (avctx->pix_fmt) {
307 case AV_PIX_FMT_YUVJ444P:
308 case AV_PIX_FMT_YUV444P:
309 s->chroma_format = CHROMA_444;
311 case AV_PIX_FMT_YUVJ422P:
312 case AV_PIX_FMT_YUV422P:
313 s->chroma_format = CHROMA_422;
315 case AV_PIX_FMT_YUVJ420P:
316 case AV_PIX_FMT_YUV420P:
318 s->chroma_format = CHROMA_420;
322 s->bit_rate = avctx->bit_rate;
323 s->width = avctx->width;
324 s->height = avctx->height;
325 if (avctx->gop_size > 600 &&
326 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
327 av_log(avctx, AV_LOG_WARNING,
328 "keyframe interval too large!, reducing it from %d to %d\n",
329 avctx->gop_size, 600);
330 avctx->gop_size = 600;
332 s->gop_size = avctx->gop_size;
334 if (avctx->max_b_frames > MAX_B_FRAMES) {
335 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
336 "is %d.\n", MAX_B_FRAMES);
337 avctx->max_b_frames = MAX_B_FRAMES;
339 s->max_b_frames = avctx->max_b_frames;
340 s->codec_id = avctx->codec->id;
341 s->strict_std_compliance = avctx->strict_std_compliance;
342 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
343 s->mpeg_quant = avctx->mpeg_quant;
344 s->rtp_mode = !!avctx->rtp_payload_size;
345 s->intra_dc_precision = avctx->intra_dc_precision;
347 // workaround some differences between how applications specify dc precision
348 if (s->intra_dc_precision < 0) {
349 s->intra_dc_precision += 8;
350 } else if (s->intra_dc_precision >= 8)
351 s->intra_dc_precision -= 8;
353 if (s->intra_dc_precision < 0) {
354 av_log(avctx, AV_LOG_ERROR,
355 "intra dc precision must be positive, note some applications use"
356 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
357 return AVERROR(EINVAL);
360 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
361 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
362 return AVERROR(EINVAL);
364 s->user_specified_pts = AV_NOPTS_VALUE;
366 if (s->gop_size <= 1) {
373 s->me_method = avctx->me_method;
376 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
379 FF_DISABLE_DEPRECATION_WARNINGS
380 if (avctx->border_masking != 0.0)
381 s->border_masking = avctx->border_masking;
382 FF_ENABLE_DEPRECATION_WARNINGS
385 s->adaptive_quant = (s->avctx->lumi_masking ||
386 s->avctx->dark_masking ||
387 s->avctx->temporal_cplx_masking ||
388 s->avctx->spatial_cplx_masking ||
389 s->avctx->p_masking ||
391 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
394 s->loop_filter = !!(s->avctx->flags & CODEC_FLAG_LOOP_FILTER);
396 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
397 switch(avctx->codec_id) {
398 case AV_CODEC_ID_MPEG1VIDEO:
399 case AV_CODEC_ID_MPEG2VIDEO:
400 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
402 case AV_CODEC_ID_MPEG4:
403 case AV_CODEC_ID_MSMPEG4V1:
404 case AV_CODEC_ID_MSMPEG4V2:
405 case AV_CODEC_ID_MSMPEG4V3:
406 if (avctx->rc_max_rate >= 15000000) {
407 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
408 } else if(avctx->rc_max_rate >= 2000000) {
409 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
410 } else if(avctx->rc_max_rate >= 384000) {
411 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
413 avctx->rc_buffer_size = 40;
414 avctx->rc_buffer_size *= 16384;
417 if (avctx->rc_buffer_size) {
418 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
422 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
423 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
427 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
428 av_log(avctx, AV_LOG_INFO,
429 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
432 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
433 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
437 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
438 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
442 if (avctx->rc_max_rate &&
443 avctx->rc_max_rate == avctx->bit_rate &&
444 avctx->rc_max_rate != avctx->rc_min_rate) {
445 av_log(avctx, AV_LOG_INFO,
446 "impossible bitrate constraints, this will fail\n");
449 if (avctx->rc_buffer_size &&
450 avctx->bit_rate * (int64_t)avctx->time_base.num >
451 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
452 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
456 if (!s->fixed_qscale &&
457 avctx->bit_rate * av_q2d(avctx->time_base) >
458 avctx->bit_rate_tolerance) {
459 av_log(avctx, AV_LOG_WARNING,
460 "bitrate tolerance %d too small for bitrate %d, overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
461 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
464 if (s->avctx->rc_max_rate &&
465 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
466 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
467 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
468 90000LL * (avctx->rc_buffer_size - 1) >
469 s->avctx->rc_max_rate * 0xFFFFLL) {
470 av_log(avctx, AV_LOG_INFO,
471 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
472 "specified vbv buffer is too large for the given bitrate!\n");
475 if ((s->avctx->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
476 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
477 s->codec_id != AV_CODEC_ID_FLV1) {
478 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
482 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
483 av_log(avctx, AV_LOG_ERROR,
484 "OBMC is only supported with simple mb decision\n");
488 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
489 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
493 if (s->max_b_frames &&
494 s->codec_id != AV_CODEC_ID_MPEG4 &&
495 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
496 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
497 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
500 if (s->max_b_frames < 0) {
501 av_log(avctx, AV_LOG_ERROR,
502 "max b frames must be 0 or positive for mpegvideo based encoders\n");
506 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
507 s->codec_id == AV_CODEC_ID_H263 ||
508 s->codec_id == AV_CODEC_ID_H263P) &&
509 (avctx->sample_aspect_ratio.num > 255 ||
510 avctx->sample_aspect_ratio.den > 255)) {
511 av_log(avctx, AV_LOG_WARNING,
512 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
513 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
514 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
515 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
518 if ((s->codec_id == AV_CODEC_ID_H263 ||
519 s->codec_id == AV_CODEC_ID_H263P) &&
520 (avctx->width > 2048 ||
521 avctx->height > 1152 )) {
522 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
525 if ((s->codec_id == AV_CODEC_ID_H263 ||
526 s->codec_id == AV_CODEC_ID_H263P) &&
527 ((avctx->width &3) ||
528 (avctx->height&3) )) {
529 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
533 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
534 (avctx->width > 4095 ||
535 avctx->height > 4095 )) {
536 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
540 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
541 (avctx->width > 16383 ||
542 avctx->height > 16383 )) {
543 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
547 if (s->codec_id == AV_CODEC_ID_RV10 &&
549 avctx->height&15 )) {
550 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
551 return AVERROR(EINVAL);
554 if (s->codec_id == AV_CODEC_ID_RV20 &&
557 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
558 return AVERROR(EINVAL);
561 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
562 s->codec_id == AV_CODEC_ID_WMV2) &&
564 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
568 if ((s->avctx->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
569 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
570 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
574 // FIXME mpeg2 uses that too
575 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
576 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
577 av_log(avctx, AV_LOG_ERROR,
578 "mpeg2 style quantization not supported by codec\n");
582 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
583 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
587 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
588 s->avctx->mb_decision != FF_MB_DECISION_RD) {
589 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
593 if (s->avctx->scenechange_threshold < 1000000000 &&
594 (s->avctx->flags & CODEC_FLAG_CLOSED_GOP)) {
595 av_log(avctx, AV_LOG_ERROR,
596 "closed gop with scene change detection are not supported yet, "
597 "set threshold to 1000000000\n");
601 if (s->avctx->flags & CODEC_FLAG_LOW_DELAY) {
602 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
603 av_log(avctx, AV_LOG_ERROR,
604 "low delay forcing is only available for mpeg2\n");
607 if (s->max_b_frames != 0) {
608 av_log(avctx, AV_LOG_ERROR,
609 "b frames cannot be used with low delay\n");
614 if (s->q_scale_type == 1) {
615 if (avctx->qmax > 12) {
616 av_log(avctx, AV_LOG_ERROR,
617 "non linear quant only supports qmax <= 12 currently\n");
622 if (s->avctx->thread_count > 1 &&
623 s->codec_id != AV_CODEC_ID_MPEG4 &&
624 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
625 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
626 s->codec_id != AV_CODEC_ID_MJPEG &&
627 (s->codec_id != AV_CODEC_ID_H263P)) {
628 av_log(avctx, AV_LOG_ERROR,
629 "multi threaded encoding not supported by codec\n");
633 if (s->avctx->thread_count < 1) {
634 av_log(avctx, AV_LOG_ERROR,
635 "automatic thread number detection not supported by codec, "
640 if (s->avctx->slices > 1 || s->avctx->thread_count > 1)
643 if (s->avctx->thread_count > 1 && s->codec_id == AV_CODEC_ID_H263P)
644 s->h263_slice_structured = 1;
646 if (!avctx->time_base.den || !avctx->time_base.num) {
647 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
651 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
652 av_log(avctx, AV_LOG_INFO,
653 "notice: b_frame_strategy only affects the first pass\n");
654 avctx->b_frame_strategy = 0;
657 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
659 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
660 avctx->time_base.den /= i;
661 avctx->time_base.num /= i;
665 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
666 // (a + x * 3 / 8) / x
667 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
668 s->inter_quant_bias = 0;
670 s->intra_quant_bias = 0;
672 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
675 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
676 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
677 return AVERROR(EINVAL);
680 #if FF_API_QUANT_BIAS
681 FF_DISABLE_DEPRECATION_WARNINGS
682 if (s->intra_quant_bias == FF_DEFAULT_QUANT_BIAS &&
683 avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
684 s->intra_quant_bias = avctx->intra_quant_bias;
685 if (s->inter_quant_bias == FF_DEFAULT_QUANT_BIAS &&
686 avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
687 s->inter_quant_bias = avctx->inter_quant_bias;
688 FF_ENABLE_DEPRECATION_WARNINGS
691 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
693 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
694 s->avctx->time_base.den > (1 << 16) - 1) {
695 av_log(avctx, AV_LOG_ERROR,
696 "timebase %d/%d not supported by MPEG 4 standard, "
697 "the maximum admitted value for the timebase denominator "
698 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
702 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
704 switch (avctx->codec->id) {
705 case AV_CODEC_ID_MPEG1VIDEO:
706 s->out_format = FMT_MPEG1;
707 s->low_delay = !!(s->avctx->flags & CODEC_FLAG_LOW_DELAY);
708 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
710 case AV_CODEC_ID_MPEG2VIDEO:
711 s->out_format = FMT_MPEG1;
712 s->low_delay = !!(s->avctx->flags & CODEC_FLAG_LOW_DELAY);
713 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
716 case AV_CODEC_ID_MJPEG:
717 case AV_CODEC_ID_AMV:
718 s->out_format = FMT_MJPEG;
719 s->intra_only = 1; /* force intra only for jpeg */
720 if (!CONFIG_MJPEG_ENCODER ||
721 ff_mjpeg_encode_init(s) < 0)
726 case AV_CODEC_ID_H261:
727 if (!CONFIG_H261_ENCODER)
729 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
730 av_log(avctx, AV_LOG_ERROR,
731 "The specified picture size of %dx%d is not valid for the "
732 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
733 s->width, s->height);
736 s->out_format = FMT_H261;
739 s->rtp_mode = 0; /* Sliced encoding not supported */
741 case AV_CODEC_ID_H263:
742 if (!CONFIG_H263_ENCODER)
744 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
745 s->width, s->height) == 8) {
746 av_log(avctx, AV_LOG_ERROR,
747 "The specified picture size of %dx%d is not valid for "
748 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
749 "352x288, 704x576, and 1408x1152. "
750 "Try H.263+.\n", s->width, s->height);
753 s->out_format = FMT_H263;
757 case AV_CODEC_ID_H263P:
758 s->out_format = FMT_H263;
761 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
762 s->modified_quant = s->h263_aic;
763 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
764 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
767 /* These are just to be sure */
771 case AV_CODEC_ID_FLV1:
772 s->out_format = FMT_H263;
773 s->h263_flv = 2; /* format = 1; 11-bit codes */
774 s->unrestricted_mv = 1;
775 s->rtp_mode = 0; /* don't allow GOB */
779 case AV_CODEC_ID_RV10:
780 s->out_format = FMT_H263;
784 case AV_CODEC_ID_RV20:
785 s->out_format = FMT_H263;
788 s->modified_quant = 1;
792 s->unrestricted_mv = 0;
794 case AV_CODEC_ID_MPEG4:
795 s->out_format = FMT_H263;
797 s->unrestricted_mv = 1;
798 s->low_delay = s->max_b_frames ? 0 : 1;
799 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
801 case AV_CODEC_ID_MSMPEG4V2:
802 s->out_format = FMT_H263;
804 s->unrestricted_mv = 1;
805 s->msmpeg4_version = 2;
809 case AV_CODEC_ID_MSMPEG4V3:
810 s->out_format = FMT_H263;
812 s->unrestricted_mv = 1;
813 s->msmpeg4_version = 3;
814 s->flipflop_rounding = 1;
818 case AV_CODEC_ID_WMV1:
819 s->out_format = FMT_H263;
821 s->unrestricted_mv = 1;
822 s->msmpeg4_version = 4;
823 s->flipflop_rounding = 1;
827 case AV_CODEC_ID_WMV2:
828 s->out_format = FMT_H263;
830 s->unrestricted_mv = 1;
831 s->msmpeg4_version = 5;
832 s->flipflop_rounding = 1;
840 avctx->has_b_frames = !s->low_delay;
844 s->progressive_frame =
845 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
846 CODEC_FLAG_INTERLACED_ME) ||
851 if (ff_mpv_common_init(s) < 0)
854 ff_fdctdsp_init(&s->fdsp, avctx);
855 ff_me_cmp_init(&s->mecc, avctx);
856 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
857 ff_pixblockdsp_init(&s->pdsp, avctx);
858 ff_qpeldsp_init(&s->qdsp);
860 if (s->msmpeg4_version) {
861 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
862 2 * 2 * (MAX_LEVEL + 1) *
863 (MAX_RUN + 1) * 2 * sizeof(int), fail);
865 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
867 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
868 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
869 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
870 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
871 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
872 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
873 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
874 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
875 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
876 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
878 if (s->avctx->noise_reduction) {
879 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
880 2 * 64 * sizeof(uint16_t), fail);
883 ff_dct_encode_init(s);
885 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
886 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
888 s->quant_precision = 5;
890 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
891 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->avctx->frame_skip_cmp);
893 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
894 ff_h261_encode_init(s);
895 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
896 ff_h263_encode_init(s);
897 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
898 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
900 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
901 && s->out_format == FMT_MPEG1)
902 ff_mpeg1_encode_init(s);
905 for (i = 0; i < 64; i++) {
906 int j = s->idsp.idct_permutation[i];
907 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
909 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
910 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
911 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
913 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
916 s->chroma_intra_matrix[j] =
917 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
918 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
920 if (s->avctx->intra_matrix)
921 s->intra_matrix[j] = s->avctx->intra_matrix[i];
922 if (s->avctx->inter_matrix)
923 s->inter_matrix[j] = s->avctx->inter_matrix[i];
926 /* precompute matrix */
927 /* for mjpeg, we do include qscale in the matrix */
928 if (s->out_format != FMT_MJPEG) {
929 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
930 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
932 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
933 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
937 if (ff_rate_control_init(s) < 0)
940 #if FF_API_ERROR_RATE
941 FF_DISABLE_DEPRECATION_WARNINGS
942 if (avctx->error_rate)
943 s->error_rate = avctx->error_rate;
944 FF_ENABLE_DEPRECATION_WARNINGS;
947 #if FF_API_NORMALIZE_AQP
948 FF_DISABLE_DEPRECATION_WARNINGS
949 if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
950 s->mpv_flags |= FF_MPV_FLAG_NAQ;
951 FF_ENABLE_DEPRECATION_WARNINGS;
955 FF_DISABLE_DEPRECATION_WARNINGS
956 if (avctx->flags & CODEC_FLAG_MV0)
957 s->mpv_flags |= FF_MPV_FLAG_MV0;
958 FF_ENABLE_DEPRECATION_WARNINGS
962 FF_DISABLE_DEPRECATION_WARNINGS
963 if (avctx->rc_qsquish != 0.0)
964 s->rc_qsquish = avctx->rc_qsquish;
965 if (avctx->rc_qmod_amp != 0.0)
966 s->rc_qmod_amp = avctx->rc_qmod_amp;
967 if (avctx->rc_qmod_freq)
968 s->rc_qmod_freq = avctx->rc_qmod_freq;
969 if (avctx->rc_buffer_aggressivity != 1.0)
970 s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
971 if (avctx->rc_initial_cplx != 0.0)
972 s->rc_initial_cplx = avctx->rc_initial_cplx;
974 s->lmin = avctx->lmin;
976 s->lmax = avctx->lmax;
980 s->rc_eq = av_strdup(avctx->rc_eq);
982 return AVERROR(ENOMEM);
984 FF_ENABLE_DEPRECATION_WARNINGS
987 if (avctx->b_frame_strategy == 2) {
988 for (i = 0; i < s->max_b_frames + 2; i++) {
989 s->tmp_frames[i] = av_frame_alloc();
990 if (!s->tmp_frames[i])
991 return AVERROR(ENOMEM);
993 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
994 s->tmp_frames[i]->width = s->width >> avctx->brd_scale;
995 s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
997 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1005 ff_mpv_encode_end(avctx);
1006 return AVERROR_UNKNOWN;
1009 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1011 MpegEncContext *s = avctx->priv_data;
1014 ff_rate_control_uninit(s);
1016 ff_mpv_common_end(s);
1017 if (CONFIG_MJPEG_ENCODER &&
1018 s->out_format == FMT_MJPEG)
1019 ff_mjpeg_encode_close(s);
1021 av_freep(&avctx->extradata);
1023 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1024 av_frame_free(&s->tmp_frames[i]);
1026 ff_free_picture_tables(&s->new_picture);
1027 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1029 av_freep(&s->avctx->stats_out);
1030 av_freep(&s->ac_stats);
1032 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1033 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1034 s->q_chroma_intra_matrix= NULL;
1035 s->q_chroma_intra_matrix16= NULL;
1036 av_freep(&s->q_intra_matrix);
1037 av_freep(&s->q_inter_matrix);
1038 av_freep(&s->q_intra_matrix16);
1039 av_freep(&s->q_inter_matrix16);
1040 av_freep(&s->input_picture);
1041 av_freep(&s->reordered_input_picture);
1042 av_freep(&s->dct_offset);
1047 static int get_sae(uint8_t *src, int ref, int stride)
1052 for (y = 0; y < 16; y++) {
1053 for (x = 0; x < 16; x++) {
1054 acc += FFABS(src[x + y * stride] - ref);
1061 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1062 uint8_t *ref, int stride)
1068 h = s->height & ~15;
1070 for (y = 0; y < h; y += 16) {
1071 for (x = 0; x < w; x += 16) {
1072 int offset = x + y * stride;
1073 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1075 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1076 int sae = get_sae(src + offset, mean, stride);
1078 acc += sae + 500 < sad;
1084 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1086 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1087 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1088 s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1089 &s->linesize, &s->uvlinesize);
1092 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1094 Picture *pic = NULL;
1096 int i, display_picture_number = 0, ret;
1097 const int encoding_delay = s->max_b_frames ? s->max_b_frames :
1098 (s->low_delay ? 0 : 1);
1103 display_picture_number = s->input_picture_number++;
1105 if (pts != AV_NOPTS_VALUE) {
1106 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1107 int64_t last = s->user_specified_pts;
1110 av_log(s->avctx, AV_LOG_ERROR,
1111 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1113 return AVERROR(EINVAL);
1116 if (!s->low_delay && display_picture_number == 1)
1117 s->dts_delta = pts - last;
1119 s->user_specified_pts = pts;
1121 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1122 s->user_specified_pts =
1123 pts = s->user_specified_pts + 1;
1124 av_log(s->avctx, AV_LOG_INFO,
1125 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1128 pts = display_picture_number;
1134 if (!pic_arg->buf[0] ||
1135 pic_arg->linesize[0] != s->linesize ||
1136 pic_arg->linesize[1] != s->uvlinesize ||
1137 pic_arg->linesize[2] != s->uvlinesize)
1139 if ((s->width & 15) || (s->height & 15))
1141 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1143 if (s->linesize & (STRIDE_ALIGN-1))
1146 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1147 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1149 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1153 pic = &s->picture[i];
1157 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1160 ret = alloc_picture(s, pic, direct);
1165 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1166 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1167 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1170 int h_chroma_shift, v_chroma_shift;
1171 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1175 for (i = 0; i < 3; i++) {
1176 int src_stride = pic_arg->linesize[i];
1177 int dst_stride = i ? s->uvlinesize : s->linesize;
1178 int h_shift = i ? h_chroma_shift : 0;
1179 int v_shift = i ? v_chroma_shift : 0;
1180 int w = s->width >> h_shift;
1181 int h = s->height >> v_shift;
1182 uint8_t *src = pic_arg->data[i];
1183 uint8_t *dst = pic->f->data[i];
1186 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1187 && !s->progressive_sequence
1188 && FFALIGN(s->height, 32) - s->height > 16)
1191 if (!s->avctx->rc_buffer_size)
1192 dst += INPLACE_OFFSET;
1194 if (src_stride == dst_stride)
1195 memcpy(dst, src, src_stride * h);
1198 uint8_t *dst2 = dst;
1200 memcpy(dst2, src, w);
1205 if ((s->width & 15) || (s->height & (vpad-1))) {
1206 s->mpvencdsp.draw_edges(dst, dst_stride,
1215 ret = av_frame_copy_props(pic->f, pic_arg);
1219 pic->f->display_picture_number = display_picture_number;
1220 pic->f->pts = pts; // we set this here to avoid modifiying pic_arg
1223 /* shift buffer entries */
1224 for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1225 s->input_picture[i - 1] = s->input_picture[i];
1227 s->input_picture[encoding_delay] = (Picture*) pic;
1232 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1236 int64_t score64 = 0;
1238 for (plane = 0; plane < 3; plane++) {
1239 const int stride = p->f->linesize[plane];
1240 const int bw = plane ? 1 : 2;
1241 for (y = 0; y < s->mb_height * bw; y++) {
1242 for (x = 0; x < s->mb_width * bw; x++) {
1243 int off = p->shared ? 0 : 16;
1244 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1245 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1246 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1248 switch (FFABS(s->avctx->frame_skip_exp)) {
1249 case 0: score = FFMAX(score, v); break;
1250 case 1: score += FFABS(v); break;
1251 case 2: score64 += v * (int64_t)v; break;
1252 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1253 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1262 if (s->avctx->frame_skip_exp < 0)
1263 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1264 -1.0/s->avctx->frame_skip_exp);
1266 if (score64 < s->avctx->frame_skip_threshold)
1268 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1273 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1275 AVPacket pkt = { 0 };
1276 int ret, got_output;
1278 av_init_packet(&pkt);
1279 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1284 av_free_packet(&pkt);
1288 static int estimate_best_b_count(MpegEncContext *s)
1290 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1291 AVCodecContext *c = avcodec_alloc_context3(NULL);
1292 const int scale = s->avctx->brd_scale;
1293 int i, j, out_size, p_lambda, b_lambda, lambda2;
1294 int64_t best_rd = INT64_MAX;
1295 int best_b_count = -1;
1298 return AVERROR(ENOMEM);
1299 av_assert0(scale >= 0 && scale <= 3);
1302 //s->next_picture_ptr->quality;
1303 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1304 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1305 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1306 if (!b_lambda) // FIXME we should do this somewhere else
1307 b_lambda = p_lambda;
1308 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1311 c->width = s->width >> scale;
1312 c->height = s->height >> scale;
1313 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR;
1314 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1315 c->mb_decision = s->avctx->mb_decision;
1316 c->me_cmp = s->avctx->me_cmp;
1317 c->mb_cmp = s->avctx->mb_cmp;
1318 c->me_sub_cmp = s->avctx->me_sub_cmp;
1319 c->pix_fmt = AV_PIX_FMT_YUV420P;
1320 c->time_base = s->avctx->time_base;
1321 c->max_b_frames = s->max_b_frames;
1323 if (avcodec_open2(c, codec, NULL) < 0)
1326 for (i = 0; i < s->max_b_frames + 2; i++) {
1327 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1328 s->next_picture_ptr;
1331 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1332 pre_input = *pre_input_ptr;
1333 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1335 if (!pre_input.shared && i) {
1336 data[0] += INPLACE_OFFSET;
1337 data[1] += INPLACE_OFFSET;
1338 data[2] += INPLACE_OFFSET;
1341 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1342 s->tmp_frames[i]->linesize[0],
1344 pre_input.f->linesize[0],
1345 c->width, c->height);
1346 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1347 s->tmp_frames[i]->linesize[1],
1349 pre_input.f->linesize[1],
1350 c->width >> 1, c->height >> 1);
1351 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1352 s->tmp_frames[i]->linesize[2],
1354 pre_input.f->linesize[2],
1355 c->width >> 1, c->height >> 1);
1359 for (j = 0; j < s->max_b_frames + 1; j++) {
1362 if (!s->input_picture[j])
1365 c->error[0] = c->error[1] = c->error[2] = 0;
1367 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1368 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1370 out_size = encode_frame(c, s->tmp_frames[0]);
1372 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1374 for (i = 0; i < s->max_b_frames + 1; i++) {
1375 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1377 s->tmp_frames[i + 1]->pict_type = is_p ?
1378 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1379 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1381 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1383 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1386 /* get the delayed frames */
1388 out_size = encode_frame(c, NULL);
1389 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1392 rd += c->error[0] + c->error[1] + c->error[2];
1403 return best_b_count;
1406 static int select_input_picture(MpegEncContext *s)
1410 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1411 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1412 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1414 /* set next picture type & ordering */
1415 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1416 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1417 if (s->picture_in_gop_number < s->gop_size &&
1418 s->next_picture_ptr &&
1419 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1420 // FIXME check that te gop check above is +-1 correct
1421 av_frame_unref(s->input_picture[0]->f);
1423 ff_vbv_update(s, 0);
1429 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1430 !s->next_picture_ptr || s->intra_only) {
1431 s->reordered_input_picture[0] = s->input_picture[0];
1432 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1433 s->reordered_input_picture[0]->f->coded_picture_number =
1434 s->coded_picture_number++;
1438 if (s->avctx->flags & CODEC_FLAG_PASS2) {
1439 for (i = 0; i < s->max_b_frames + 1; i++) {
1440 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1442 if (pict_num >= s->rc_context.num_entries)
1444 if (!s->input_picture[i]) {
1445 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1449 s->input_picture[i]->f->pict_type =
1450 s->rc_context.entry[pict_num].new_pict_type;
1454 if (s->avctx->b_frame_strategy == 0) {
1455 b_frames = s->max_b_frames;
1456 while (b_frames && !s->input_picture[b_frames])
1458 } else if (s->avctx->b_frame_strategy == 1) {
1459 for (i = 1; i < s->max_b_frames + 1; i++) {
1460 if (s->input_picture[i] &&
1461 s->input_picture[i]->b_frame_score == 0) {
1462 s->input_picture[i]->b_frame_score =
1464 s->input_picture[i ]->f->data[0],
1465 s->input_picture[i - 1]->f->data[0],
1469 for (i = 0; i < s->max_b_frames + 1; i++) {
1470 if (!s->input_picture[i] ||
1471 s->input_picture[i]->b_frame_score - 1 >
1472 s->mb_num / s->avctx->b_sensitivity)
1476 b_frames = FFMAX(0, i - 1);
1479 for (i = 0; i < b_frames + 1; i++) {
1480 s->input_picture[i]->b_frame_score = 0;
1482 } else if (s->avctx->b_frame_strategy == 2) {
1483 b_frames = estimate_best_b_count(s);
1485 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1491 for (i = b_frames - 1; i >= 0; i--) {
1492 int type = s->input_picture[i]->f->pict_type;
1493 if (type && type != AV_PICTURE_TYPE_B)
1496 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1497 b_frames == s->max_b_frames) {
1498 av_log(s->avctx, AV_LOG_ERROR,
1499 "warning, too many b frames in a row\n");
1502 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1503 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1504 s->gop_size > s->picture_in_gop_number) {
1505 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1507 if (s->avctx->flags & CODEC_FLAG_CLOSED_GOP)
1509 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1513 if ((s->avctx->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1514 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1517 s->reordered_input_picture[0] = s->input_picture[b_frames];
1518 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1519 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1520 s->reordered_input_picture[0]->f->coded_picture_number =
1521 s->coded_picture_number++;
1522 for (i = 0; i < b_frames; i++) {
1523 s->reordered_input_picture[i + 1] = s->input_picture[i];
1524 s->reordered_input_picture[i + 1]->f->pict_type =
1526 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1527 s->coded_picture_number++;
1532 if (s->reordered_input_picture[0]) {
1533 s->reordered_input_picture[0]->reference =
1534 s->reordered_input_picture[0]->f->pict_type !=
1535 AV_PICTURE_TYPE_B ? 3 : 0;
1537 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1538 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1541 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1542 // input is a shared pix, so we can't modifiy it -> alloc a new
1543 // one & ensure that the shared one is reuseable
1546 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1549 pic = &s->picture[i];
1551 pic->reference = s->reordered_input_picture[0]->reference;
1552 if (alloc_picture(s, pic, 0) < 0) {
1556 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1560 /* mark us unused / free shared pic */
1561 av_frame_unref(s->reordered_input_picture[0]->f);
1562 s->reordered_input_picture[0]->shared = 0;
1564 s->current_picture_ptr = pic;
1566 // input is not a shared pix -> reuse buffer for current_pix
1567 s->current_picture_ptr = s->reordered_input_picture[0];
1568 for (i = 0; i < 4; i++) {
1569 s->new_picture.f->data[i] += INPLACE_OFFSET;
1572 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1573 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1574 s->current_picture_ptr)) < 0)
1577 s->picture_number = s->new_picture.f->display_picture_number;
1579 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1584 static void frame_end(MpegEncContext *s)
1586 if (s->unrestricted_mv &&
1587 s->current_picture.reference &&
1589 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1590 int hshift = desc->log2_chroma_w;
1591 int vshift = desc->log2_chroma_h;
1592 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1593 s->current_picture.f->linesize[0],
1594 s->h_edge_pos, s->v_edge_pos,
1595 EDGE_WIDTH, EDGE_WIDTH,
1596 EDGE_TOP | EDGE_BOTTOM);
1597 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1598 s->current_picture.f->linesize[1],
1599 s->h_edge_pos >> hshift,
1600 s->v_edge_pos >> vshift,
1601 EDGE_WIDTH >> hshift,
1602 EDGE_WIDTH >> vshift,
1603 EDGE_TOP | EDGE_BOTTOM);
1604 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1605 s->current_picture.f->linesize[2],
1606 s->h_edge_pos >> hshift,
1607 s->v_edge_pos >> vshift,
1608 EDGE_WIDTH >> hshift,
1609 EDGE_WIDTH >> vshift,
1610 EDGE_TOP | EDGE_BOTTOM);
1615 s->last_pict_type = s->pict_type;
1616 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1617 if (s->pict_type!= AV_PICTURE_TYPE_B)
1618 s->last_non_b_pict_type = s->pict_type;
1620 #if FF_API_CODED_FRAME
1621 FF_DISABLE_DEPRECATION_WARNINGS
1622 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1623 FF_ENABLE_DEPRECATION_WARNINGS
1627 static void update_noise_reduction(MpegEncContext *s)
1631 for (intra = 0; intra < 2; intra++) {
1632 if (s->dct_count[intra] > (1 << 16)) {
1633 for (i = 0; i < 64; i++) {
1634 s->dct_error_sum[intra][i] >>= 1;
1636 s->dct_count[intra] >>= 1;
1639 for (i = 0; i < 64; i++) {
1640 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1641 s->dct_count[intra] +
1642 s->dct_error_sum[intra][i] / 2) /
1643 (s->dct_error_sum[intra][i] + 1);
1648 static int frame_start(MpegEncContext *s)
1652 /* mark & release old frames */
1653 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1654 s->last_picture_ptr != s->next_picture_ptr &&
1655 s->last_picture_ptr->f->buf[0]) {
1656 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1659 s->current_picture_ptr->f->pict_type = s->pict_type;
1660 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1662 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1663 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1664 s->current_picture_ptr)) < 0)
1667 if (s->pict_type != AV_PICTURE_TYPE_B) {
1668 s->last_picture_ptr = s->next_picture_ptr;
1670 s->next_picture_ptr = s->current_picture_ptr;
1673 if (s->last_picture_ptr) {
1674 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1675 if (s->last_picture_ptr->f->buf[0] &&
1676 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1677 s->last_picture_ptr)) < 0)
1680 if (s->next_picture_ptr) {
1681 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1682 if (s->next_picture_ptr->f->buf[0] &&
1683 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1684 s->next_picture_ptr)) < 0)
1688 if (s->picture_structure!= PICT_FRAME) {
1690 for (i = 0; i < 4; i++) {
1691 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1692 s->current_picture.f->data[i] +=
1693 s->current_picture.f->linesize[i];
1695 s->current_picture.f->linesize[i] *= 2;
1696 s->last_picture.f->linesize[i] *= 2;
1697 s->next_picture.f->linesize[i] *= 2;
1701 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1702 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1703 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1704 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1705 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1706 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1708 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1709 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1712 if (s->dct_error_sum) {
1713 av_assert2(s->avctx->noise_reduction && s->encoding);
1714 update_noise_reduction(s);
1720 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1721 const AVFrame *pic_arg, int *got_packet)
1723 MpegEncContext *s = avctx->priv_data;
1724 int i, stuffing_count, ret;
1725 int context_count = s->slice_context_count;
1727 s->picture_in_gop_number++;
1729 if (load_input_picture(s, pic_arg) < 0)
1732 if (select_input_picture(s) < 0) {
1737 if (s->new_picture.f->data[0]) {
1739 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1740 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - FF_INPUT_BUFFER_PADDING_SIZE
1742 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1743 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size)) < 0)
1746 s->mb_info_ptr = av_packet_new_side_data(pkt,
1747 AV_PKT_DATA_H263_MB_INFO,
1748 s->mb_width*s->mb_height*12);
1749 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1752 for (i = 0; i < context_count; i++) {
1753 int start_y = s->thread_context[i]->start_mb_y;
1754 int end_y = s->thread_context[i]-> end_mb_y;
1755 int h = s->mb_height;
1756 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1757 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1759 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1762 s->pict_type = s->new_picture.f->pict_type;
1764 ret = frame_start(s);
1768 ret = encode_picture(s, s->picture_number);
1769 if (growing_buffer) {
1770 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1771 pkt->data = s->pb.buf;
1772 pkt->size = avctx->internal->byte_buffer_size;
1777 avctx->header_bits = s->header_bits;
1778 avctx->mv_bits = s->mv_bits;
1779 avctx->misc_bits = s->misc_bits;
1780 avctx->i_tex_bits = s->i_tex_bits;
1781 avctx->p_tex_bits = s->p_tex_bits;
1782 avctx->i_count = s->i_count;
1783 // FIXME f/b_count in avctx
1784 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1785 avctx->skip_count = s->skip_count;
1789 sd = av_packet_new_side_data(pkt, AV_PKT_DATA_QUALITY_FACTOR,
1792 return AVERROR(ENOMEM);
1793 *(int *)sd = s->current_picture.f->quality;
1795 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1796 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1798 if (avctx->rc_buffer_size) {
1799 RateControlContext *rcc = &s->rc_context;
1800 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1802 if (put_bits_count(&s->pb) > max_size &&
1803 s->lambda < s->lmax) {
1804 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1805 (s->qscale + 1) / s->qscale);
1806 if (s->adaptive_quant) {
1808 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1809 s->lambda_table[i] =
1810 FFMAX(s->lambda_table[i] + 1,
1811 s->lambda_table[i] * (s->qscale + 1) /
1814 s->mb_skipped = 0; // done in frame_start()
1815 // done in encode_picture() so we must undo it
1816 if (s->pict_type == AV_PICTURE_TYPE_P) {
1817 if (s->flipflop_rounding ||
1818 s->codec_id == AV_CODEC_ID_H263P ||
1819 s->codec_id == AV_CODEC_ID_MPEG4)
1820 s->no_rounding ^= 1;
1822 if (s->pict_type != AV_PICTURE_TYPE_B) {
1823 s->time_base = s->last_time_base;
1824 s->last_non_b_time = s->time - s->pp_time;
1826 for (i = 0; i < context_count; i++) {
1827 PutBitContext *pb = &s->thread_context[i]->pb;
1828 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1830 av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1834 av_assert0(s->avctx->rc_max_rate);
1837 if (s->avctx->flags & CODEC_FLAG_PASS1)
1838 ff_write_pass1_stats(s);
1840 for (i = 0; i < 4; i++) {
1841 s->current_picture_ptr->f->error[i] =
1842 s->current_picture.f->error[i] =
1843 s->current_picture.error[i];
1844 avctx->error[i] += s->current_picture_ptr->f->error[i];
1847 if (s->avctx->flags & CODEC_FLAG_PASS1)
1848 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1849 avctx->i_tex_bits + avctx->p_tex_bits ==
1850 put_bits_count(&s->pb));
1851 flush_put_bits(&s->pb);
1852 s->frame_bits = put_bits_count(&s->pb);
1854 stuffing_count = ff_vbv_update(s, s->frame_bits);
1855 s->stuffing_bits = 8*stuffing_count;
1856 if (stuffing_count) {
1857 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1858 stuffing_count + 50) {
1859 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1863 switch (s->codec_id) {
1864 case AV_CODEC_ID_MPEG1VIDEO:
1865 case AV_CODEC_ID_MPEG2VIDEO:
1866 while (stuffing_count--) {
1867 put_bits(&s->pb, 8, 0);
1870 case AV_CODEC_ID_MPEG4:
1871 put_bits(&s->pb, 16, 0);
1872 put_bits(&s->pb, 16, 0x1C3);
1873 stuffing_count -= 4;
1874 while (stuffing_count--) {
1875 put_bits(&s->pb, 8, 0xFF);
1879 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1881 flush_put_bits(&s->pb);
1882 s->frame_bits = put_bits_count(&s->pb);
1885 /* update mpeg1/2 vbv_delay for CBR */
1886 if (s->avctx->rc_max_rate &&
1887 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1888 s->out_format == FMT_MPEG1 &&
1889 90000LL * (avctx->rc_buffer_size - 1) <=
1890 s->avctx->rc_max_rate * 0xFFFFLL) {
1891 int vbv_delay, min_delay;
1892 double inbits = s->avctx->rc_max_rate *
1893 av_q2d(s->avctx->time_base);
1894 int minbits = s->frame_bits - 8 *
1895 (s->vbv_delay_ptr - s->pb.buf - 1);
1896 double bits = s->rc_context.buffer_index + minbits - inbits;
1899 av_log(s->avctx, AV_LOG_ERROR,
1900 "Internal error, negative bits\n");
1902 assert(s->repeat_first_field == 0);
1904 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1905 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1906 s->avctx->rc_max_rate;
1908 vbv_delay = FFMAX(vbv_delay, min_delay);
1910 av_assert0(vbv_delay < 0xFFFF);
1912 s->vbv_delay_ptr[0] &= 0xF8;
1913 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1914 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1915 s->vbv_delay_ptr[2] &= 0x07;
1916 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1917 avctx->vbv_delay = vbv_delay * 300;
1919 s->total_bits += s->frame_bits;
1920 avctx->frame_bits = s->frame_bits;
1922 pkt->pts = s->current_picture.f->pts;
1923 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1924 if (!s->current_picture.f->coded_picture_number)
1925 pkt->dts = pkt->pts - s->dts_delta;
1927 pkt->dts = s->reordered_pts;
1928 s->reordered_pts = pkt->pts;
1930 pkt->dts = pkt->pts;
1931 if (s->current_picture.f->key_frame)
1932 pkt->flags |= AV_PKT_FLAG_KEY;
1934 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1939 /* release non-reference frames */
1940 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1941 if (!s->picture[i].reference)
1942 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1945 av_assert1((s->frame_bits & 7) == 0);
1947 pkt->size = s->frame_bits / 8;
1948 *got_packet = !!pkt->size;
1952 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1953 int n, int threshold)
1955 static const char tab[64] = {
1956 3, 2, 2, 1, 1, 1, 1, 1,
1957 1, 1, 1, 1, 1, 1, 1, 1,
1958 1, 1, 1, 1, 1, 1, 1, 1,
1959 0, 0, 0, 0, 0, 0, 0, 0,
1960 0, 0, 0, 0, 0, 0, 0, 0,
1961 0, 0, 0, 0, 0, 0, 0, 0,
1962 0, 0, 0, 0, 0, 0, 0, 0,
1963 0, 0, 0, 0, 0, 0, 0, 0
1968 int16_t *block = s->block[n];
1969 const int last_index = s->block_last_index[n];
1972 if (threshold < 0) {
1974 threshold = -threshold;
1978 /* Are all we could set to zero already zero? */
1979 if (last_index <= skip_dc - 1)
1982 for (i = 0; i <= last_index; i++) {
1983 const int j = s->intra_scantable.permutated[i];
1984 const int level = FFABS(block[j]);
1986 if (skip_dc && i == 0)
1990 } else if (level > 1) {
1996 if (score >= threshold)
1998 for (i = skip_dc; i <= last_index; i++) {
1999 const int j = s->intra_scantable.permutated[i];
2003 s->block_last_index[n] = 0;
2005 s->block_last_index[n] = -1;
2008 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2012 const int maxlevel = s->max_qcoeff;
2013 const int minlevel = s->min_qcoeff;
2017 i = 1; // skip clipping of intra dc
2021 for (; i <= last_index; i++) {
2022 const int j = s->intra_scantable.permutated[i];
2023 int level = block[j];
2025 if (level > maxlevel) {
2028 } else if (level < minlevel) {
2036 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2037 av_log(s->avctx, AV_LOG_INFO,
2038 "warning, clipping %d dct coefficients to %d..%d\n",
2039 overflow, minlevel, maxlevel);
2042 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2046 for (y = 0; y < 8; y++) {
2047 for (x = 0; x < 8; x++) {
2053 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2054 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2055 int v = ptr[x2 + y2 * stride];
2061 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2066 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2067 int motion_x, int motion_y,
2068 int mb_block_height,
2072 int16_t weight[12][64];
2073 int16_t orig[12][64];
2074 const int mb_x = s->mb_x;
2075 const int mb_y = s->mb_y;
2078 int dct_offset = s->linesize * 8; // default for progressive frames
2079 int uv_dct_offset = s->uvlinesize * 8;
2080 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2081 ptrdiff_t wrap_y, wrap_c;
2083 for (i = 0; i < mb_block_count; i++)
2084 skip_dct[i] = s->skipdct;
2086 if (s->adaptive_quant) {
2087 const int last_qp = s->qscale;
2088 const int mb_xy = mb_x + mb_y * s->mb_stride;
2090 s->lambda = s->lambda_table[mb_xy];
2093 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2094 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2095 s->dquant = s->qscale - last_qp;
2097 if (s->out_format == FMT_H263) {
2098 s->dquant = av_clip(s->dquant, -2, 2);
2100 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2102 if (s->pict_type == AV_PICTURE_TYPE_B) {
2103 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2106 if (s->mv_type == MV_TYPE_8X8)
2112 ff_set_qscale(s, last_qp + s->dquant);
2113 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2114 ff_set_qscale(s, s->qscale + s->dquant);
2116 wrap_y = s->linesize;
2117 wrap_c = s->uvlinesize;
2118 ptr_y = s->new_picture.f->data[0] +
2119 (mb_y * 16 * wrap_y) + mb_x * 16;
2120 ptr_cb = s->new_picture.f->data[1] +
2121 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2122 ptr_cr = s->new_picture.f->data[2] +
2123 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2125 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2126 uint8_t *ebuf = s->sc.edge_emu_buffer + 36 * wrap_y;
2127 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2128 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2129 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2131 16, 16, mb_x * 16, mb_y * 16,
2132 s->width, s->height);
2134 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2136 mb_block_width, mb_block_height,
2137 mb_x * mb_block_width, mb_y * mb_block_height,
2139 ptr_cb = ebuf + 16 * wrap_y;
2140 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2142 mb_block_width, mb_block_height,
2143 mb_x * mb_block_width, mb_y * mb_block_height,
2145 ptr_cr = ebuf + 16 * wrap_y + 16;
2149 if (s->avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
2150 int progressive_score, interlaced_score;
2152 s->interlaced_dct = 0;
2153 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2154 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2155 NULL, wrap_y, 8) - 400;
2157 if (progressive_score > 0) {
2158 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2159 NULL, wrap_y * 2, 8) +
2160 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2161 NULL, wrap_y * 2, 8);
2162 if (progressive_score > interlaced_score) {
2163 s->interlaced_dct = 1;
2165 dct_offset = wrap_y;
2166 uv_dct_offset = wrap_c;
2168 if (s->chroma_format == CHROMA_422 ||
2169 s->chroma_format == CHROMA_444)
2175 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2176 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2177 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2178 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2180 if (s->avctx->flags & CODEC_FLAG_GRAY) {
2184 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2185 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2186 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2187 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2188 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2189 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2190 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2191 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2192 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2193 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2194 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2195 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2199 op_pixels_func (*op_pix)[4];
2200 qpel_mc_func (*op_qpix)[16];
2201 uint8_t *dest_y, *dest_cb, *dest_cr;
2203 dest_y = s->dest[0];
2204 dest_cb = s->dest[1];
2205 dest_cr = s->dest[2];
2207 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2208 op_pix = s->hdsp.put_pixels_tab;
2209 op_qpix = s->qdsp.put_qpel_pixels_tab;
2211 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2212 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2215 if (s->mv_dir & MV_DIR_FORWARD) {
2216 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2217 s->last_picture.f->data,
2219 op_pix = s->hdsp.avg_pixels_tab;
2220 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2222 if (s->mv_dir & MV_DIR_BACKWARD) {
2223 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2224 s->next_picture.f->data,
2228 if (s->avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
2229 int progressive_score, interlaced_score;
2231 s->interlaced_dct = 0;
2232 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2233 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2237 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2238 progressive_score -= 400;
2240 if (progressive_score > 0) {
2241 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2243 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2247 if (progressive_score > interlaced_score) {
2248 s->interlaced_dct = 1;
2250 dct_offset = wrap_y;
2251 uv_dct_offset = wrap_c;
2253 if (s->chroma_format == CHROMA_422)
2259 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2260 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2261 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2262 dest_y + dct_offset, wrap_y);
2263 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2264 dest_y + dct_offset + 8, wrap_y);
2266 if (s->avctx->flags & CODEC_FLAG_GRAY) {
2270 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2271 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2272 if (!s->chroma_y_shift) { /* 422 */
2273 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2274 dest_cb + uv_dct_offset, wrap_c);
2275 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2276 dest_cr + uv_dct_offset, wrap_c);
2279 /* pre quantization */
2280 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2281 2 * s->qscale * s->qscale) {
2283 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2285 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2287 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2288 wrap_y, 8) < 20 * s->qscale)
2290 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2291 wrap_y, 8) < 20 * s->qscale)
2293 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2295 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2297 if (!s->chroma_y_shift) { /* 422 */
2298 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2299 dest_cb + uv_dct_offset,
2300 wrap_c, 8) < 20 * s->qscale)
2302 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2303 dest_cr + uv_dct_offset,
2304 wrap_c, 8) < 20 * s->qscale)
2310 if (s->quantizer_noise_shaping) {
2312 get_visual_weight(weight[0], ptr_y , wrap_y);
2314 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2316 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2318 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2320 get_visual_weight(weight[4], ptr_cb , wrap_c);
2322 get_visual_weight(weight[5], ptr_cr , wrap_c);
2323 if (!s->chroma_y_shift) { /* 422 */
2325 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2328 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2331 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2334 /* DCT & quantize */
2335 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2337 for (i = 0; i < mb_block_count; i++) {
2340 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2341 // FIXME we could decide to change to quantizer instead of
2343 // JS: I don't think that would be a good idea it could lower
2344 // quality instead of improve it. Just INTRADC clipping
2345 // deserves changes in quantizer
2347 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2349 s->block_last_index[i] = -1;
2351 if (s->quantizer_noise_shaping) {
2352 for (i = 0; i < mb_block_count; i++) {
2354 s->block_last_index[i] =
2355 dct_quantize_refine(s, s->block[i], weight[i],
2356 orig[i], i, s->qscale);
2361 if (s->luma_elim_threshold && !s->mb_intra)
2362 for (i = 0; i < 4; i++)
2363 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2364 if (s->chroma_elim_threshold && !s->mb_intra)
2365 for (i = 4; i < mb_block_count; i++)
2366 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2368 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2369 for (i = 0; i < mb_block_count; i++) {
2370 if (s->block_last_index[i] == -1)
2371 s->coded_score[i] = INT_MAX / 256;
2376 if ((s->avctx->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
2377 s->block_last_index[4] =
2378 s->block_last_index[5] = 0;
2380 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2381 if (!s->chroma_y_shift) { /* 422 / 444 */
2382 for (i=6; i<12; i++) {
2383 s->block_last_index[i] = 0;
2384 s->block[i][0] = s->block[4][0];
2389 // non c quantize code returns incorrect block_last_index FIXME
2390 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2391 for (i = 0; i < mb_block_count; i++) {
2393 if (s->block_last_index[i] > 0) {
2394 for (j = 63; j > 0; j--) {
2395 if (s->block[i][s->intra_scantable.permutated[j]])
2398 s->block_last_index[i] = j;
2403 /* huffman encode */
2404 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2405 case AV_CODEC_ID_MPEG1VIDEO:
2406 case AV_CODEC_ID_MPEG2VIDEO:
2407 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2408 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2410 case AV_CODEC_ID_MPEG4:
2411 if (CONFIG_MPEG4_ENCODER)
2412 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2414 case AV_CODEC_ID_MSMPEG4V2:
2415 case AV_CODEC_ID_MSMPEG4V3:
2416 case AV_CODEC_ID_WMV1:
2417 if (CONFIG_MSMPEG4_ENCODER)
2418 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2420 case AV_CODEC_ID_WMV2:
2421 if (CONFIG_WMV2_ENCODER)
2422 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2424 case AV_CODEC_ID_H261:
2425 if (CONFIG_H261_ENCODER)
2426 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2428 case AV_CODEC_ID_H263:
2429 case AV_CODEC_ID_H263P:
2430 case AV_CODEC_ID_FLV1:
2431 case AV_CODEC_ID_RV10:
2432 case AV_CODEC_ID_RV20:
2433 if (CONFIG_H263_ENCODER)
2434 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2436 case AV_CODEC_ID_MJPEG:
2437 case AV_CODEC_ID_AMV:
2438 if (CONFIG_MJPEG_ENCODER)
2439 ff_mjpeg_encode_mb(s, s->block);
2446 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2448 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2449 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2450 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2453 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2456 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2459 d->mb_skip_run= s->mb_skip_run;
2461 d->last_dc[i] = s->last_dc[i];
2464 d->mv_bits= s->mv_bits;
2465 d->i_tex_bits= s->i_tex_bits;
2466 d->p_tex_bits= s->p_tex_bits;
2467 d->i_count= s->i_count;
2468 d->f_count= s->f_count;
2469 d->b_count= s->b_count;
2470 d->skip_count= s->skip_count;
2471 d->misc_bits= s->misc_bits;
2475 d->qscale= s->qscale;
2476 d->dquant= s->dquant;
2478 d->esc3_level_length= s->esc3_level_length;
2481 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2484 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2485 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2488 d->mb_skip_run= s->mb_skip_run;
2490 d->last_dc[i] = s->last_dc[i];
2493 d->mv_bits= s->mv_bits;
2494 d->i_tex_bits= s->i_tex_bits;
2495 d->p_tex_bits= s->p_tex_bits;
2496 d->i_count= s->i_count;
2497 d->f_count= s->f_count;
2498 d->b_count= s->b_count;
2499 d->skip_count= s->skip_count;
2500 d->misc_bits= s->misc_bits;
2502 d->mb_intra= s->mb_intra;
2503 d->mb_skipped= s->mb_skipped;
2504 d->mv_type= s->mv_type;
2505 d->mv_dir= s->mv_dir;
2507 if(s->data_partitioning){
2509 d->tex_pb= s->tex_pb;
2513 d->block_last_index[i]= s->block_last_index[i];
2514 d->interlaced_dct= s->interlaced_dct;
2515 d->qscale= s->qscale;
2517 d->esc3_level_length= s->esc3_level_length;
2520 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2521 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2522 int *dmin, int *next_block, int motion_x, int motion_y)
2525 uint8_t *dest_backup[3];
2527 copy_context_before_encode(s, backup, type);
2529 s->block= s->blocks[*next_block];
2530 s->pb= pb[*next_block];
2531 if(s->data_partitioning){
2532 s->pb2 = pb2 [*next_block];
2533 s->tex_pb= tex_pb[*next_block];
2537 memcpy(dest_backup, s->dest, sizeof(s->dest));
2538 s->dest[0] = s->sc.rd_scratchpad;
2539 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2540 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2541 av_assert0(s->linesize >= 32); //FIXME
2544 encode_mb(s, motion_x, motion_y);
2546 score= put_bits_count(&s->pb);
2547 if(s->data_partitioning){
2548 score+= put_bits_count(&s->pb2);
2549 score+= put_bits_count(&s->tex_pb);
2552 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2553 ff_mpv_decode_mb(s, s->block);
2555 score *= s->lambda2;
2556 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2560 memcpy(s->dest, dest_backup, sizeof(s->dest));
2567 copy_context_after_encode(best, s, type);
2571 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2572 uint32_t *sq = ff_square_tab + 256;
2577 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2578 else if(w==8 && h==8)
2579 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2583 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2592 static int sse_mb(MpegEncContext *s){
2596 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2597 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2600 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2601 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2602 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2603 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2605 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2606 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2607 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2610 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2611 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2612 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2615 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2616 MpegEncContext *s= *(void**)arg;
2620 s->me.dia_size= s->avctx->pre_dia_size;
2621 s->first_slice_line=1;
2622 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2623 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2624 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2626 s->first_slice_line=0;
2634 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2635 MpegEncContext *s= *(void**)arg;
2637 ff_check_alignment();
2639 s->me.dia_size= s->avctx->dia_size;
2640 s->first_slice_line=1;
2641 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2642 s->mb_x=0; //for block init below
2643 ff_init_block_index(s);
2644 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2645 s->block_index[0]+=2;
2646 s->block_index[1]+=2;
2647 s->block_index[2]+=2;
2648 s->block_index[3]+=2;
2650 /* compute motion vector & mb_type and store in context */
2651 if(s->pict_type==AV_PICTURE_TYPE_B)
2652 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2654 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2656 s->first_slice_line=0;
2661 static int mb_var_thread(AVCodecContext *c, void *arg){
2662 MpegEncContext *s= *(void**)arg;
2665 ff_check_alignment();
2667 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2668 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2671 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2673 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2675 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2676 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2678 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2679 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2680 s->me.mb_var_sum_temp += varc;
2686 static void write_slice_end(MpegEncContext *s){
2687 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2688 if(s->partitioned_frame){
2689 ff_mpeg4_merge_partitions(s);
2692 ff_mpeg4_stuffing(&s->pb);
2693 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2694 ff_mjpeg_encode_stuffing(s);
2697 avpriv_align_put_bits(&s->pb);
2698 flush_put_bits(&s->pb);
2700 if ((s->avctx->flags & CODEC_FLAG_PASS1) && !s->partitioned_frame)
2701 s->misc_bits+= get_bits_diff(s);
2704 static void write_mb_info(MpegEncContext *s)
2706 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2707 int offset = put_bits_count(&s->pb);
2708 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2709 int gobn = s->mb_y / s->gob_index;
2711 if (CONFIG_H263_ENCODER)
2712 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2713 bytestream_put_le32(&ptr, offset);
2714 bytestream_put_byte(&ptr, s->qscale);
2715 bytestream_put_byte(&ptr, gobn);
2716 bytestream_put_le16(&ptr, mba);
2717 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2718 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2719 /* 4MV not implemented */
2720 bytestream_put_byte(&ptr, 0); /* hmv2 */
2721 bytestream_put_byte(&ptr, 0); /* vmv2 */
2724 static void update_mb_info(MpegEncContext *s, int startcode)
2728 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2729 s->mb_info_size += 12;
2730 s->prev_mb_info = s->last_mb_info;
2733 s->prev_mb_info = put_bits_count(&s->pb)/8;
2734 /* This might have incremented mb_info_size above, and we return without
2735 * actually writing any info into that slot yet. But in that case,
2736 * this will be called again at the start of the after writing the
2737 * start code, actually writing the mb info. */
2741 s->last_mb_info = put_bits_count(&s->pb)/8;
2742 if (!s->mb_info_size)
2743 s->mb_info_size += 12;
2747 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2749 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2750 && s->slice_context_count == 1
2751 && s->pb.buf == s->avctx->internal->byte_buffer) {
2752 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2753 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2755 uint8_t *new_buffer = NULL;
2756 int new_buffer_size = 0;
2758 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2759 s->avctx->internal->byte_buffer_size + size_increase);
2761 return AVERROR(ENOMEM);
2763 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2764 av_free(s->avctx->internal->byte_buffer);
2765 s->avctx->internal->byte_buffer = new_buffer;
2766 s->avctx->internal->byte_buffer_size = new_buffer_size;
2767 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2768 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2769 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2771 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2772 return AVERROR(EINVAL);
2776 static int encode_thread(AVCodecContext *c, void *arg){
2777 MpegEncContext *s= *(void**)arg;
2778 int mb_x, mb_y, pdif = 0;
2779 int chr_h= 16>>s->chroma_y_shift;
2781 MpegEncContext best_s = { 0 }, backup_s;
2782 uint8_t bit_buf[2][MAX_MB_BYTES];
2783 uint8_t bit_buf2[2][MAX_MB_BYTES];
2784 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2785 PutBitContext pb[2], pb2[2], tex_pb[2];
2787 ff_check_alignment();
2790 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2791 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2792 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2795 s->last_bits= put_bits_count(&s->pb);
2806 /* init last dc values */
2807 /* note: quant matrix value (8) is implied here */
2808 s->last_dc[i] = 128 << s->intra_dc_precision;
2810 s->current_picture.error[i] = 0;
2812 if(s->codec_id==AV_CODEC_ID_AMV){
2813 s->last_dc[0] = 128*8/13;
2814 s->last_dc[1] = 128*8/14;
2815 s->last_dc[2] = 128*8/14;
2818 memset(s->last_mv, 0, sizeof(s->last_mv));
2822 switch(s->codec_id){
2823 case AV_CODEC_ID_H263:
2824 case AV_CODEC_ID_H263P:
2825 case AV_CODEC_ID_FLV1:
2826 if (CONFIG_H263_ENCODER)
2827 s->gob_index = H263_GOB_HEIGHT(s->height);
2829 case AV_CODEC_ID_MPEG4:
2830 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2831 ff_mpeg4_init_partitions(s);
2837 s->first_slice_line = 1;
2838 s->ptr_lastgob = s->pb.buf;
2839 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2843 ff_set_qscale(s, s->qscale);
2844 ff_init_block_index(s);
2846 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2847 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2848 int mb_type= s->mb_type[xy];
2852 int size_increase = s->avctx->internal->byte_buffer_size/4
2853 + s->mb_width*MAX_MB_BYTES;
2855 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2856 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2857 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2860 if(s->data_partitioning){
2861 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2862 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2863 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2869 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2870 ff_update_block_index(s);
2872 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2873 ff_h261_reorder_mb_index(s);
2874 xy= s->mb_y*s->mb_stride + s->mb_x;
2875 mb_type= s->mb_type[xy];
2878 /* write gob / video packet header */
2880 int current_packet_size, is_gob_start;
2882 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2884 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2886 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2888 switch(s->codec_id){
2889 case AV_CODEC_ID_H263:
2890 case AV_CODEC_ID_H263P:
2891 if(!s->h263_slice_structured)
2892 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2894 case AV_CODEC_ID_MPEG2VIDEO:
2895 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2896 case AV_CODEC_ID_MPEG1VIDEO:
2897 if(s->mb_skip_run) is_gob_start=0;
2899 case AV_CODEC_ID_MJPEG:
2900 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2905 if(s->start_mb_y != mb_y || mb_x!=0){
2908 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2909 ff_mpeg4_init_partitions(s);
2913 av_assert2((put_bits_count(&s->pb)&7) == 0);
2914 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2916 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2917 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2918 int d = 100 / s->error_rate;
2920 current_packet_size=0;
2921 s->pb.buf_ptr= s->ptr_lastgob;
2922 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2926 if (s->avctx->rtp_callback){
2927 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2928 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2930 update_mb_info(s, 1);
2932 switch(s->codec_id){
2933 case AV_CODEC_ID_MPEG4:
2934 if (CONFIG_MPEG4_ENCODER) {
2935 ff_mpeg4_encode_video_packet_header(s);
2936 ff_mpeg4_clean_buffers(s);
2939 case AV_CODEC_ID_MPEG1VIDEO:
2940 case AV_CODEC_ID_MPEG2VIDEO:
2941 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2942 ff_mpeg1_encode_slice_header(s);
2943 ff_mpeg1_clean_buffers(s);
2946 case AV_CODEC_ID_H263:
2947 case AV_CODEC_ID_H263P:
2948 if (CONFIG_H263_ENCODER)
2949 ff_h263_encode_gob_header(s, mb_y);
2953 if (s->avctx->flags & CODEC_FLAG_PASS1) {
2954 int bits= put_bits_count(&s->pb);
2955 s->misc_bits+= bits - s->last_bits;
2959 s->ptr_lastgob += current_packet_size;
2960 s->first_slice_line=1;
2961 s->resync_mb_x=mb_x;
2962 s->resync_mb_y=mb_y;
2966 if( (s->resync_mb_x == s->mb_x)
2967 && s->resync_mb_y+1 == s->mb_y){
2968 s->first_slice_line=0;
2972 s->dquant=0; //only for QP_RD
2974 update_mb_info(s, 0);
2976 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2978 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2980 copy_context_before_encode(&backup_s, s, -1);
2982 best_s.data_partitioning= s->data_partitioning;
2983 best_s.partitioned_frame= s->partitioned_frame;
2984 if(s->data_partitioning){
2985 backup_s.pb2= s->pb2;
2986 backup_s.tex_pb= s->tex_pb;
2989 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2990 s->mv_dir = MV_DIR_FORWARD;
2991 s->mv_type = MV_TYPE_16X16;
2993 s->mv[0][0][0] = s->p_mv_table[xy][0];
2994 s->mv[0][0][1] = s->p_mv_table[xy][1];
2995 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2996 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2998 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2999 s->mv_dir = MV_DIR_FORWARD;
3000 s->mv_type = MV_TYPE_FIELD;
3003 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3004 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3005 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3007 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3008 &dmin, &next_block, 0, 0);
3010 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3011 s->mv_dir = MV_DIR_FORWARD;
3012 s->mv_type = MV_TYPE_16X16;
3016 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3017 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3019 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3020 s->mv_dir = MV_DIR_FORWARD;
3021 s->mv_type = MV_TYPE_8X8;
3024 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3025 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3027 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3028 &dmin, &next_block, 0, 0);
3030 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3031 s->mv_dir = MV_DIR_FORWARD;
3032 s->mv_type = MV_TYPE_16X16;
3034 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3035 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3036 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3037 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3039 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3040 s->mv_dir = MV_DIR_BACKWARD;
3041 s->mv_type = MV_TYPE_16X16;
3043 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3044 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3045 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3046 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3048 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3049 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3050 s->mv_type = MV_TYPE_16X16;
3052 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3053 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3054 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3055 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3056 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3057 &dmin, &next_block, 0, 0);
3059 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3060 s->mv_dir = MV_DIR_FORWARD;
3061 s->mv_type = MV_TYPE_FIELD;
3064 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3065 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3066 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3068 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3069 &dmin, &next_block, 0, 0);
3071 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3072 s->mv_dir = MV_DIR_BACKWARD;
3073 s->mv_type = MV_TYPE_FIELD;
3076 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3077 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3078 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3080 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3081 &dmin, &next_block, 0, 0);
3083 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3084 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3085 s->mv_type = MV_TYPE_FIELD;
3087 for(dir=0; dir<2; dir++){
3089 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3090 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3091 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3094 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3095 &dmin, &next_block, 0, 0);
3097 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3099 s->mv_type = MV_TYPE_16X16;
3103 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3104 &dmin, &next_block, 0, 0);
3105 if(s->h263_pred || s->h263_aic){
3107 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3109 ff_clean_intra_table_entries(s); //old mode?
3113 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3114 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3115 const int last_qp= backup_s.qscale;
3118 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3119 static const int dquant_tab[4]={-1,1,-2,2};
3120 int storecoefs = s->mb_intra && s->dc_val[0];
3122 av_assert2(backup_s.dquant == 0);
3125 s->mv_dir= best_s.mv_dir;
3126 s->mv_type = MV_TYPE_16X16;
3127 s->mb_intra= best_s.mb_intra;
3128 s->mv[0][0][0] = best_s.mv[0][0][0];
3129 s->mv[0][0][1] = best_s.mv[0][0][1];
3130 s->mv[1][0][0] = best_s.mv[1][0][0];
3131 s->mv[1][0][1] = best_s.mv[1][0][1];
3133 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3134 for(; qpi<4; qpi++){
3135 int dquant= dquant_tab[qpi];
3136 qp= last_qp + dquant;
3137 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3139 backup_s.dquant= dquant;
3142 dc[i]= s->dc_val[0][ s->block_index[i] ];
3143 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3147 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3148 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3149 if(best_s.qscale != qp){
3152 s->dc_val[0][ s->block_index[i] ]= dc[i];
3153 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3160 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3161 int mx= s->b_direct_mv_table[xy][0];
3162 int my= s->b_direct_mv_table[xy][1];
3164 backup_s.dquant = 0;
3165 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3167 ff_mpeg4_set_direct_mv(s, mx, my);
3168 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3169 &dmin, &next_block, mx, my);
3171 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3172 backup_s.dquant = 0;
3173 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3175 ff_mpeg4_set_direct_mv(s, 0, 0);
3176 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3177 &dmin, &next_block, 0, 0);
3179 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3182 coded |= s->block_last_index[i];
3185 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3186 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3187 mx=my=0; //FIXME find the one we actually used
3188 ff_mpeg4_set_direct_mv(s, mx, my);
3189 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3197 s->mv_dir= best_s.mv_dir;
3198 s->mv_type = best_s.mv_type;
3200 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3201 s->mv[0][0][1] = best_s.mv[0][0][1];
3202 s->mv[1][0][0] = best_s.mv[1][0][0];
3203 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3206 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3207 &dmin, &next_block, mx, my);
3212 s->current_picture.qscale_table[xy] = best_s.qscale;
3214 copy_context_after_encode(s, &best_s, -1);
3216 pb_bits_count= put_bits_count(&s->pb);
3217 flush_put_bits(&s->pb);
3218 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3221 if(s->data_partitioning){
3222 pb2_bits_count= put_bits_count(&s->pb2);
3223 flush_put_bits(&s->pb2);
3224 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3225 s->pb2= backup_s.pb2;
3227 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3228 flush_put_bits(&s->tex_pb);
3229 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3230 s->tex_pb= backup_s.tex_pb;
3232 s->last_bits= put_bits_count(&s->pb);
3234 if (CONFIG_H263_ENCODER &&
3235 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3236 ff_h263_update_motion_val(s);
3238 if(next_block==0){ //FIXME 16 vs linesize16
3239 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3240 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3241 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3244 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3245 ff_mpv_decode_mb(s, s->block);
3247 int motion_x = 0, motion_y = 0;
3248 s->mv_type=MV_TYPE_16X16;
3249 // only one MB-Type possible
3252 case CANDIDATE_MB_TYPE_INTRA:
3255 motion_x= s->mv[0][0][0] = 0;
3256 motion_y= s->mv[0][0][1] = 0;
3258 case CANDIDATE_MB_TYPE_INTER:
3259 s->mv_dir = MV_DIR_FORWARD;
3261 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3262 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3264 case CANDIDATE_MB_TYPE_INTER_I:
3265 s->mv_dir = MV_DIR_FORWARD;
3266 s->mv_type = MV_TYPE_FIELD;
3269 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3270 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3271 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3274 case CANDIDATE_MB_TYPE_INTER4V:
3275 s->mv_dir = MV_DIR_FORWARD;
3276 s->mv_type = MV_TYPE_8X8;
3279 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3280 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3283 case CANDIDATE_MB_TYPE_DIRECT:
3284 if (CONFIG_MPEG4_ENCODER) {
3285 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3287 motion_x=s->b_direct_mv_table[xy][0];
3288 motion_y=s->b_direct_mv_table[xy][1];
3289 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3292 case CANDIDATE_MB_TYPE_DIRECT0:
3293 if (CONFIG_MPEG4_ENCODER) {
3294 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3296 ff_mpeg4_set_direct_mv(s, 0, 0);
3299 case CANDIDATE_MB_TYPE_BIDIR:
3300 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3302 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3303 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3304 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3305 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3307 case CANDIDATE_MB_TYPE_BACKWARD:
3308 s->mv_dir = MV_DIR_BACKWARD;
3310 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3311 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3313 case CANDIDATE_MB_TYPE_FORWARD:
3314 s->mv_dir = MV_DIR_FORWARD;
3316 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3317 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3319 case CANDIDATE_MB_TYPE_FORWARD_I:
3320 s->mv_dir = MV_DIR_FORWARD;
3321 s->mv_type = MV_TYPE_FIELD;
3324 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3325 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3326 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3329 case CANDIDATE_MB_TYPE_BACKWARD_I:
3330 s->mv_dir = MV_DIR_BACKWARD;
3331 s->mv_type = MV_TYPE_FIELD;
3334 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3335 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3336 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3339 case CANDIDATE_MB_TYPE_BIDIR_I:
3340 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3341 s->mv_type = MV_TYPE_FIELD;
3343 for(dir=0; dir<2; dir++){
3345 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3346 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3347 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3352 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3355 encode_mb(s, motion_x, motion_y);
3357 // RAL: Update last macroblock type
3358 s->last_mv_dir = s->mv_dir;
3360 if (CONFIG_H263_ENCODER &&
3361 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3362 ff_h263_update_motion_val(s);
3364 ff_mpv_decode_mb(s, s->block);
3367 /* clean the MV table in IPS frames for direct mode in B frames */
3368 if(s->mb_intra /* && I,P,S_TYPE */){
3369 s->p_mv_table[xy][0]=0;
3370 s->p_mv_table[xy][1]=0;
3373 if (s->avctx->flags & CODEC_FLAG_PSNR) {
3377 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3378 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3380 s->current_picture.error[0] += sse(
3381 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3382 s->dest[0], w, h, s->linesize);
3383 s->current_picture.error[1] += sse(
3384 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3385 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3386 s->current_picture.error[2] += sse(
3387 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3388 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3391 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3392 ff_h263_loop_filter(s);
3394 ff_dlog(s->avctx, "MB %d %d bits\n",
3395 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3399 //not beautiful here but we must write it before flushing so it has to be here
3400 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3401 ff_msmpeg4_encode_ext_header(s);
3405 /* Send the last GOB if RTP */
3406 if (s->avctx->rtp_callback) {
3407 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3408 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3409 /* Call the RTP callback to send the last GOB */
3411 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3417 #define MERGE(field) dst->field += src->field; src->field=0
3418 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3419 MERGE(me.scene_change_score);
3420 MERGE(me.mc_mb_var_sum_temp);
3421 MERGE(me.mb_var_sum_temp);
3424 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3427 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3428 MERGE(dct_count[1]);
3437 MERGE(er.error_count);
3438 MERGE(padding_bug_score);
3439 MERGE(current_picture.error[0]);
3440 MERGE(current_picture.error[1]);
3441 MERGE(current_picture.error[2]);
3443 if(dst->avctx->noise_reduction){
3444 for(i=0; i<64; i++){
3445 MERGE(dct_error_sum[0][i]);
3446 MERGE(dct_error_sum[1][i]);
3450 assert(put_bits_count(&src->pb) % 8 ==0);
3451 assert(put_bits_count(&dst->pb) % 8 ==0);
3452 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3453 flush_put_bits(&dst->pb);
3456 static int estimate_qp(MpegEncContext *s, int dry_run){
3457 if (s->next_lambda){
3458 s->current_picture_ptr->f->quality =
3459 s->current_picture.f->quality = s->next_lambda;
3460 if(!dry_run) s->next_lambda= 0;
3461 } else if (!s->fixed_qscale) {
3462 s->current_picture_ptr->f->quality =
3463 s->current_picture.f->quality = ff_rate_estimate_qscale(s, dry_run);
3464 if (s->current_picture.f->quality < 0)
3468 if(s->adaptive_quant){
3469 switch(s->codec_id){
3470 case AV_CODEC_ID_MPEG4:
3471 if (CONFIG_MPEG4_ENCODER)
3472 ff_clean_mpeg4_qscales(s);
3474 case AV_CODEC_ID_H263:
3475 case AV_CODEC_ID_H263P:
3476 case AV_CODEC_ID_FLV1:
3477 if (CONFIG_H263_ENCODER)
3478 ff_clean_h263_qscales(s);
3481 ff_init_qscale_tab(s);
3484 s->lambda= s->lambda_table[0];
3487 s->lambda = s->current_picture.f->quality;
3492 /* must be called before writing the header */
3493 static void set_frame_distances(MpegEncContext * s){
3494 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3495 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3497 if(s->pict_type==AV_PICTURE_TYPE_B){
3498 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3499 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3501 s->pp_time= s->time - s->last_non_b_time;
3502 s->last_non_b_time= s->time;
3503 assert(s->picture_number==0 || s->pp_time > 0);
3507 static int encode_picture(MpegEncContext *s, int picture_number)
3511 int context_count = s->slice_context_count;
3513 s->picture_number = picture_number;
3515 /* Reset the average MB variance */
3516 s->me.mb_var_sum_temp =
3517 s->me.mc_mb_var_sum_temp = 0;
3519 /* we need to initialize some time vars before we can encode b-frames */
3520 // RAL: Condition added for MPEG1VIDEO
3521 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3522 set_frame_distances(s);
3523 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3524 ff_set_mpeg4_time(s);
3526 s->me.scene_change_score=0;
3528 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3530 if(s->pict_type==AV_PICTURE_TYPE_I){
3531 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3532 else s->no_rounding=0;
3533 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3534 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3535 s->no_rounding ^= 1;
3538 if (s->avctx->flags & CODEC_FLAG_PASS2) {
3539 if (estimate_qp(s,1) < 0)
3541 ff_get_2pass_fcode(s);
3542 } else if (!(s->avctx->flags & CODEC_FLAG_QSCALE)) {
3543 if(s->pict_type==AV_PICTURE_TYPE_B)
3544 s->lambda= s->last_lambda_for[s->pict_type];
3546 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3550 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3551 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3552 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3553 s->q_chroma_intra_matrix = s->q_intra_matrix;
3554 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3557 s->mb_intra=0; //for the rate distortion & bit compare functions
3558 for(i=1; i<context_count; i++){
3559 ret = ff_update_duplicate_context(s->thread_context[i], s);
3567 /* Estimate motion for every MB */
3568 if(s->pict_type != AV_PICTURE_TYPE_I){
3569 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3570 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3571 if (s->pict_type != AV_PICTURE_TYPE_B) {
3572 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3573 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3577 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3578 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3580 for(i=0; i<s->mb_stride*s->mb_height; i++)
3581 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3583 if(!s->fixed_qscale){
3584 /* finding spatial complexity for I-frame rate control */
3585 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3588 for(i=1; i<context_count; i++){
3589 merge_context_after_me(s, s->thread_context[i]);
3591 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3592 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3595 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3596 s->pict_type= AV_PICTURE_TYPE_I;
3597 for(i=0; i<s->mb_stride*s->mb_height; i++)
3598 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3599 if(s->msmpeg4_version >= 3)
3601 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3602 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3606 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3607 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3609 if (s->avctx->flags & CODEC_FLAG_INTERLACED_ME) {
3611 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3612 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3613 s->f_code= FFMAX3(s->f_code, a, b);
3616 ff_fix_long_p_mvs(s);
3617 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3618 if (s->avctx->flags & CODEC_FLAG_INTERLACED_ME) {
3622 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3623 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3628 if(s->pict_type==AV_PICTURE_TYPE_B){
3631 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3632 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3633 s->f_code = FFMAX(a, b);
3635 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3636 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3637 s->b_code = FFMAX(a, b);
3639 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3640 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3641 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3642 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3643 if (s->avctx->flags & CODEC_FLAG_INTERLACED_ME) {
3645 for(dir=0; dir<2; dir++){
3648 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3649 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3650 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3651 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3659 if (estimate_qp(s, 0) < 0)
3662 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3663 s->pict_type == AV_PICTURE_TYPE_I &&
3664 !(s->avctx->flags & CODEC_FLAG_QSCALE))
3665 s->qscale= 3; //reduce clipping problems
3667 if (s->out_format == FMT_MJPEG) {
3668 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3669 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3671 if (s->avctx->intra_matrix) {
3673 luma_matrix = s->avctx->intra_matrix;
3675 if (s->avctx->chroma_intra_matrix)
3676 chroma_matrix = s->avctx->chroma_intra_matrix;
3678 /* for mjpeg, we do include qscale in the matrix */
3680 int j = s->idsp.idct_permutation[i];
3682 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3683 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3685 s->y_dc_scale_table=
3686 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3687 s->chroma_intra_matrix[0] =
3688 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3689 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3690 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3691 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3692 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3695 if(s->codec_id == AV_CODEC_ID_AMV){
3696 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3697 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3699 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3701 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3702 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3704 s->y_dc_scale_table= y;
3705 s->c_dc_scale_table= c;
3706 s->intra_matrix[0] = 13;
3707 s->chroma_intra_matrix[0] = 14;
3708 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3709 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3710 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3711 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3715 //FIXME var duplication
3716 s->current_picture_ptr->f->key_frame =
3717 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3718 s->current_picture_ptr->f->pict_type =
3719 s->current_picture.f->pict_type = s->pict_type;
3721 if (s->current_picture.f->key_frame)
3722 s->picture_in_gop_number=0;
3724 s->mb_x = s->mb_y = 0;
3725 s->last_bits= put_bits_count(&s->pb);
3726 switch(s->out_format) {
3728 if (CONFIG_MJPEG_ENCODER)
3729 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3730 s->intra_matrix, s->chroma_intra_matrix);
3733 if (CONFIG_H261_ENCODER)
3734 ff_h261_encode_picture_header(s, picture_number);
3737 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3738 ff_wmv2_encode_picture_header(s, picture_number);
3739 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3740 ff_msmpeg4_encode_picture_header(s, picture_number);
3741 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3742 ff_mpeg4_encode_picture_header(s, picture_number);
3743 else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3744 ret = ff_rv10_encode_picture_header(s, picture_number);
3748 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3749 ff_rv20_encode_picture_header(s, picture_number);
3750 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3751 ff_flv_encode_picture_header(s, picture_number);
3752 else if (CONFIG_H263_ENCODER)
3753 ff_h263_encode_picture_header(s, picture_number);
3756 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3757 ff_mpeg1_encode_picture_header(s, picture_number);
3762 bits= put_bits_count(&s->pb);
3763 s->header_bits= bits - s->last_bits;
3765 for(i=1; i<context_count; i++){
3766 update_duplicate_context_after_me(s->thread_context[i], s);
3768 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3769 for(i=1; i<context_count; i++){
3770 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3771 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3772 merge_context_after_encode(s, s->thread_context[i]);
3778 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3779 const int intra= s->mb_intra;
3782 s->dct_count[intra]++;
3784 for(i=0; i<64; i++){
3785 int level= block[i];
3789 s->dct_error_sum[intra][i] += level;
3790 level -= s->dct_offset[intra][i];
3791 if(level<0) level=0;
3793 s->dct_error_sum[intra][i] -= level;
3794 level += s->dct_offset[intra][i];
3795 if(level>0) level=0;
3802 static int dct_quantize_trellis_c(MpegEncContext *s,
3803 int16_t *block, int n,
3804 int qscale, int *overflow){
3806 const uint16_t *matrix;
3807 const uint8_t *scantable= s->intra_scantable.scantable;
3808 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3810 unsigned int threshold1, threshold2;
3822 int coeff_count[64];
3823 int qmul, qadd, start_i, last_non_zero, i, dc;
3824 const int esc_length= s->ac_esc_length;
3826 uint8_t * last_length;
3827 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3829 s->fdsp.fdct(block);
3831 if(s->dct_error_sum)
3832 s->denoise_dct(s, block);
3834 qadd= ((qscale-1)|1)*8;
3845 /* For AIC we skip quant/dequant of INTRADC */
3850 /* note: block[0] is assumed to be positive */
3851 block[0] = (block[0] + (q >> 1)) / q;
3854 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3855 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3856 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3857 bias= 1<<(QMAT_SHIFT-1);
3859 if (n > 3 && s->intra_chroma_ac_vlc_length) {
3860 length = s->intra_chroma_ac_vlc_length;
3861 last_length= s->intra_chroma_ac_vlc_last_length;
3863 length = s->intra_ac_vlc_length;
3864 last_length= s->intra_ac_vlc_last_length;
3869 qmat = s->q_inter_matrix[qscale];
3870 matrix = s->inter_matrix;
3871 length = s->inter_ac_vlc_length;
3872 last_length= s->inter_ac_vlc_last_length;
3876 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3877 threshold2= (threshold1<<1);
3879 for(i=63; i>=start_i; i--) {
3880 const int j = scantable[i];
3881 int level = block[j] * qmat[j];
3883 if(((unsigned)(level+threshold1))>threshold2){
3889 for(i=start_i; i<=last_non_zero; i++) {
3890 const int j = scantable[i];
3891 int level = block[j] * qmat[j];
3893 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3894 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3895 if(((unsigned)(level+threshold1))>threshold2){
3897 level= (bias + level)>>QMAT_SHIFT;
3899 coeff[1][i]= level-1;
3900 // coeff[2][k]= level-2;
3902 level= (bias - level)>>QMAT_SHIFT;
3903 coeff[0][i]= -level;
3904 coeff[1][i]= -level+1;
3905 // coeff[2][k]= -level+2;
3907 coeff_count[i]= FFMIN(level, 2);
3908 av_assert2(coeff_count[i]);
3911 coeff[0][i]= (level>>31)|1;
3916 *overflow= s->max_qcoeff < max; //overflow might have happened
3918 if(last_non_zero < start_i){
3919 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3920 return last_non_zero;
3923 score_tab[start_i]= 0;
3924 survivor[0]= start_i;
3927 for(i=start_i; i<=last_non_zero; i++){
3928 int level_index, j, zero_distortion;
3929 int dct_coeff= FFABS(block[ scantable[i] ]);
3930 int best_score=256*256*256*120;
3932 if (s->fdsp.fdct == ff_fdct_ifast)
3933 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3934 zero_distortion= dct_coeff*dct_coeff;
3936 for(level_index=0; level_index < coeff_count[i]; level_index++){
3938 int level= coeff[level_index][i];
3939 const int alevel= FFABS(level);
3944 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
3945 unquant_coeff= alevel*qmul + qadd;
3946 } else if(s->out_format == FMT_MJPEG) {
3947 j = s->idsp.idct_permutation[scantable[i]];
3948 unquant_coeff = alevel * matrix[j] * 8;
3950 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
3952 unquant_coeff = (int)( alevel * qscale * matrix[j]) >> 3;
3953 unquant_coeff = (unquant_coeff - 1) | 1;
3955 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) matrix[j])) >> 4;
3956 unquant_coeff = (unquant_coeff - 1) | 1;
3961 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3963 if((level&(~127)) == 0){
3964 for(j=survivor_count-1; j>=0; j--){
3965 int run= i - survivor[j];
3966 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3967 score += score_tab[i-run];
3969 if(score < best_score){
3972 level_tab[i+1]= level-64;
3976 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
3977 for(j=survivor_count-1; j>=0; j--){
3978 int run= i - survivor[j];
3979 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3980 score += score_tab[i-run];
3981 if(score < last_score){
3984 last_level= level-64;
3990 distortion += esc_length*lambda;
3991 for(j=survivor_count-1; j>=0; j--){
3992 int run= i - survivor[j];
3993 int score= distortion + score_tab[i-run];
3995 if(score < best_score){
3998 level_tab[i+1]= level-64;
4002 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4003 for(j=survivor_count-1; j>=0; j--){
4004 int run= i - survivor[j];
4005 int score= distortion + score_tab[i-run];
4006 if(score < last_score){
4009 last_level= level-64;
4017 score_tab[i+1]= best_score;
4019 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
4020 if(last_non_zero <= 27){
4021 for(; survivor_count; survivor_count--){
4022 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4026 for(; survivor_count; survivor_count--){
4027 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4032 survivor[ survivor_count++ ]= i+1;
4035 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4036 last_score= 256*256*256*120;
4037 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4038 int score= score_tab[i];
4039 if(i) score += lambda*2; //FIXME exacter?
4041 if(score < last_score){
4044 last_level= level_tab[i];
4045 last_run= run_tab[i];
4050 s->coded_score[n] = last_score;
4052 dc= FFABS(block[0]);
4053 last_non_zero= last_i - 1;
4054 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4056 if(last_non_zero < start_i)
4057 return last_non_zero;
4059 if(last_non_zero == 0 && start_i == 0){
4061 int best_score= dc * dc;
4063 for(i=0; i<coeff_count[0]; i++){
4064 int level= coeff[i][0];
4065 int alevel= FFABS(level);
4066 int unquant_coeff, score, distortion;
4068 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4069 unquant_coeff= (alevel*qmul + qadd)>>3;
4071 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) matrix[0])) >> 4;
4072 unquant_coeff = (unquant_coeff - 1) | 1;
4074 unquant_coeff = (unquant_coeff + 4) >> 3;
4075 unquant_coeff<<= 3 + 3;
4077 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4079 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4080 else score= distortion + esc_length*lambda;
4082 if(score < best_score){
4084 best_level= level - 64;
4087 block[0]= best_level;
4088 s->coded_score[n] = best_score - dc*dc;
4089 if(best_level == 0) return -1;
4090 else return last_non_zero;
4094 av_assert2(last_level);
4096 block[ perm_scantable[last_non_zero] ]= last_level;
4099 for(; i>start_i; i -= run_tab[i] + 1){
4100 block[ perm_scantable[i-1] ]= level_tab[i];
4103 return last_non_zero;
4106 //#define REFINE_STATS 1
4107 static int16_t basis[64][64];
4109 static void build_basis(uint8_t *perm){
4116 double s= 0.25*(1<<BASIS_SHIFT);
4118 int perm_index= perm[index];
4119 if(i==0) s*= sqrt(0.5);
4120 if(j==0) s*= sqrt(0.5);
4121 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4128 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4129 int16_t *block, int16_t *weight, int16_t *orig,
4132 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4133 const uint8_t *scantable= s->intra_scantable.scantable;
4134 const uint8_t *perm_scantable= s->intra_scantable.permutated;
4135 // unsigned int threshold1, threshold2;
4140 int qmul, qadd, start_i, last_non_zero, i, dc;
4142 uint8_t * last_length;
4144 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4147 static int after_last=0;
4148 static int to_zero=0;
4149 static int from_zero=0;
4152 static int messed_sign=0;
4155 if(basis[0][0] == 0)
4156 build_basis(s->idsp.idct_permutation);
4167 /* For AIC we skip quant/dequant of INTRADC */
4171 q <<= RECON_SHIFT-3;
4172 /* note: block[0] is assumed to be positive */
4174 // block[0] = (block[0] + (q >> 1)) / q;
4176 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4177 // bias= 1<<(QMAT_SHIFT-1);
4178 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4179 length = s->intra_chroma_ac_vlc_length;
4180 last_length= s->intra_chroma_ac_vlc_last_length;
4182 length = s->intra_ac_vlc_length;
4183 last_length= s->intra_ac_vlc_last_length;
4188 length = s->inter_ac_vlc_length;
4189 last_length= s->inter_ac_vlc_last_length;
4191 last_non_zero = s->block_last_index[n];
4196 dc += (1<<(RECON_SHIFT-1));
4197 for(i=0; i<64; i++){
4198 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
4201 STOP_TIMER("memset rem[]")}
4204 for(i=0; i<64; i++){
4209 w= FFABS(weight[i]) + qns*one;
4210 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4213 // w=weight[i] = (63*qns + (w/2)) / w;
4216 av_assert2(w<(1<<6));
4219 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4225 for(i=start_i; i<=last_non_zero; i++){
4226 int j= perm_scantable[i];
4227 const int level= block[j];
4231 if(level<0) coeff= qmul*level - qadd;
4232 else coeff= qmul*level + qadd;
4233 run_tab[rle_index++]=run;
4236 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4242 if(last_non_zero>0){
4243 STOP_TIMER("init rem[]")
4250 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4253 int run2, best_unquant_change=0, analyze_gradient;
4257 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4259 if(analyze_gradient){
4263 for(i=0; i<64; i++){
4266 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4269 STOP_TIMER("rem*w*w")}
4279 const int level= block[0];
4280 int change, old_coeff;
4282 av_assert2(s->mb_intra);
4286 for(change=-1; change<=1; change+=2){
4287 int new_level= level + change;
4288 int score, new_coeff;
4290 new_coeff= q*new_level;
4291 if(new_coeff >= 2048 || new_coeff < 0)
4294 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4295 new_coeff - old_coeff);
4296 if(score<best_score){
4299 best_change= change;
4300 best_unquant_change= new_coeff - old_coeff;
4307 run2= run_tab[rle_index++];
4311 for(i=start_i; i<64; i++){
4312 int j= perm_scantable[i];
4313 const int level= block[j];
4314 int change, old_coeff;
4316 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4320 if(level<0) old_coeff= qmul*level - qadd;
4321 else old_coeff= qmul*level + qadd;
4322 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4326 av_assert2(run2>=0 || i >= last_non_zero );
4329 for(change=-1; change<=1; change+=2){
4330 int new_level= level + change;
4331 int score, new_coeff, unquant_change;
4334 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4338 if(new_level<0) new_coeff= qmul*new_level - qadd;
4339 else new_coeff= qmul*new_level + qadd;
4340 if(new_coeff >= 2048 || new_coeff <= -2048)
4342 //FIXME check for overflow
4345 if(level < 63 && level > -63){
4346 if(i < last_non_zero)
4347 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4348 - length[UNI_AC_ENC_INDEX(run, level+64)];
4350 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4351 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4354 av_assert2(FFABS(new_level)==1);
4356 if(analyze_gradient){
4357 int g= d1[ scantable[i] ];
4358 if(g && (g^new_level) >= 0)
4362 if(i < last_non_zero){
4363 int next_i= i + run2 + 1;
4364 int next_level= block[ perm_scantable[next_i] ] + 64;
4366 if(next_level&(~127))
4369 if(next_i < last_non_zero)
4370 score += length[UNI_AC_ENC_INDEX(run, 65)]
4371 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4372 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4374 score += length[UNI_AC_ENC_INDEX(run, 65)]
4375 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4376 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4378 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4380 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4381 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4387 av_assert2(FFABS(level)==1);
4389 if(i < last_non_zero){
4390 int next_i= i + run2 + 1;
4391 int next_level= block[ perm_scantable[next_i] ] + 64;
4393 if(next_level&(~127))
4396 if(next_i < last_non_zero)
4397 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4398 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4399 - length[UNI_AC_ENC_INDEX(run, 65)];
4401 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4402 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4403 - length[UNI_AC_ENC_INDEX(run, 65)];
4405 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4407 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4408 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4415 unquant_change= new_coeff - old_coeff;
4416 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4418 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4420 if(score<best_score){
4423 best_change= change;
4424 best_unquant_change= unquant_change;
4428 prev_level= level + 64;
4429 if(prev_level&(~127))
4438 STOP_TIMER("iterative step")}
4442 int j= perm_scantable[ best_coeff ];
4444 block[j] += best_change;
4446 if(best_coeff > last_non_zero){
4447 last_non_zero= best_coeff;
4448 av_assert2(block[j]);
4455 if(block[j] - best_change){
4456 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4468 for(; last_non_zero>=start_i; last_non_zero--){
4469 if(block[perm_scantable[last_non_zero]])
4475 if(256*256*256*64 % count == 0){
4476 av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4481 for(i=start_i; i<=last_non_zero; i++){
4482 int j= perm_scantable[i];
4483 const int level= block[j];
4486 run_tab[rle_index++]=run;
4493 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4499 if(last_non_zero>0){
4500 STOP_TIMER("iterative search")
4505 return last_non_zero;
4509 * Permute an 8x8 block according to permuatation.
4510 * @param block the block which will be permuted according to
4511 * the given permutation vector
4512 * @param permutation the permutation vector
4513 * @param last the last non zero coefficient in scantable order, used to
4514 * speed the permutation up
4515 * @param scantable the used scantable, this is only used to speed the
4516 * permutation up, the block is not (inverse) permutated
4517 * to scantable order!
4519 static void block_permute(int16_t *block, uint8_t *permutation,
4520 const uint8_t *scantable, int last)
4527 //FIXME it is ok but not clean and might fail for some permutations
4528 // if (permutation[1] == 1)
4531 for (i = 0; i <= last; i++) {
4532 const int j = scantable[i];
4537 for (i = 0; i <= last; i++) {
4538 const int j = scantable[i];
4539 const int perm_j = permutation[j];
4540 block[perm_j] = temp[j];
4544 int ff_dct_quantize_c(MpegEncContext *s,
4545 int16_t *block, int n,
4546 int qscale, int *overflow)
4548 int i, j, level, last_non_zero, q, start_i;
4550 const uint8_t *scantable= s->intra_scantable.scantable;
4553 unsigned int threshold1, threshold2;
4555 s->fdsp.fdct(block);
4557 if(s->dct_error_sum)
4558 s->denoise_dct(s, block);
4568 /* For AIC we skip quant/dequant of INTRADC */
4571 /* note: block[0] is assumed to be positive */
4572 block[0] = (block[0] + (q >> 1)) / q;
4575 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4576 bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4580 qmat = s->q_inter_matrix[qscale];
4581 bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4583 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4584 threshold2= (threshold1<<1);
4585 for(i=63;i>=start_i;i--) {
4587 level = block[j] * qmat[j];
4589 if(((unsigned)(level+threshold1))>threshold2){
4596 for(i=start_i; i<=last_non_zero; i++) {
4598 level = block[j] * qmat[j];
4600 // if( bias+level >= (1<<QMAT_SHIFT)
4601 // || bias-level >= (1<<QMAT_SHIFT)){
4602 if(((unsigned)(level+threshold1))>threshold2){
4604 level= (bias + level)>>QMAT_SHIFT;
4607 level= (bias - level)>>QMAT_SHIFT;
4615 *overflow= s->max_qcoeff < max; //overflow might have happened
4617 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4618 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4619 block_permute(block, s->idsp.idct_permutation,
4620 scantable, last_non_zero);
4622 return last_non_zero;
4625 #define OFFSET(x) offsetof(MpegEncContext, x)
4626 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4627 static const AVOption h263_options[] = {
4628 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4629 { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4630 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4635 static const AVClass h263_class = {
4636 .class_name = "H.263 encoder",
4637 .item_name = av_default_item_name,
4638 .option = h263_options,
4639 .version = LIBAVUTIL_VERSION_INT,
4642 AVCodec ff_h263_encoder = {
4644 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4645 .type = AVMEDIA_TYPE_VIDEO,
4646 .id = AV_CODEC_ID_H263,
4647 .priv_data_size = sizeof(MpegEncContext),
4648 .init = ff_mpv_encode_init,
4649 .encode2 = ff_mpv_encode_picture,
4650 .close = ff_mpv_encode_end,
4651 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4652 .priv_class = &h263_class,
4655 static const AVOption h263p_options[] = {
4656 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4657 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4658 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4659 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4663 static const AVClass h263p_class = {
4664 .class_name = "H.263p encoder",
4665 .item_name = av_default_item_name,
4666 .option = h263p_options,
4667 .version = LIBAVUTIL_VERSION_INT,
4670 AVCodec ff_h263p_encoder = {
4672 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4673 .type = AVMEDIA_TYPE_VIDEO,
4674 .id = AV_CODEC_ID_H263P,
4675 .priv_data_size = sizeof(MpegEncContext),
4676 .init = ff_mpv_encode_init,
4677 .encode2 = ff_mpv_encode_picture,
4678 .close = ff_mpv_encode_end,
4679 .capabilities = CODEC_CAP_SLICE_THREADS,
4680 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4681 .priv_class = &h263p_class,
4684 static const AVClass msmpeg4v2_class = {
4685 .class_name = "msmpeg4v2 encoder",
4686 .item_name = av_default_item_name,
4687 .option = ff_mpv_generic_options,
4688 .version = LIBAVUTIL_VERSION_INT,
4691 AVCodec ff_msmpeg4v2_encoder = {
4692 .name = "msmpeg4v2",
4693 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4694 .type = AVMEDIA_TYPE_VIDEO,
4695 .id = AV_CODEC_ID_MSMPEG4V2,
4696 .priv_data_size = sizeof(MpegEncContext),
4697 .init = ff_mpv_encode_init,
4698 .encode2 = ff_mpv_encode_picture,
4699 .close = ff_mpv_encode_end,
4700 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4701 .priv_class = &msmpeg4v2_class,
4704 static const AVClass msmpeg4v3_class = {
4705 .class_name = "msmpeg4v3 encoder",
4706 .item_name = av_default_item_name,
4707 .option = ff_mpv_generic_options,
4708 .version = LIBAVUTIL_VERSION_INT,
4711 AVCodec ff_msmpeg4v3_encoder = {
4713 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4714 .type = AVMEDIA_TYPE_VIDEO,
4715 .id = AV_CODEC_ID_MSMPEG4V3,
4716 .priv_data_size = sizeof(MpegEncContext),
4717 .init = ff_mpv_encode_init,
4718 .encode2 = ff_mpv_encode_picture,
4719 .close = ff_mpv_encode_end,
4720 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4721 .priv_class = &msmpeg4v3_class,
4724 static const AVClass wmv1_class = {
4725 .class_name = "wmv1 encoder",
4726 .item_name = av_default_item_name,
4727 .option = ff_mpv_generic_options,
4728 .version = LIBAVUTIL_VERSION_INT,
4731 AVCodec ff_wmv1_encoder = {
4733 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4734 .type = AVMEDIA_TYPE_VIDEO,
4735 .id = AV_CODEC_ID_WMV1,
4736 .priv_data_size = sizeof(MpegEncContext),
4737 .init = ff_mpv_encode_init,
4738 .encode2 = ff_mpv_encode_picture,
4739 .close = ff_mpv_encode_end,
4740 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4741 .priv_class = &wmv1_class,