2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
32 #include "libavutil/internal.h"
33 #include "libavutil/intmath.h"
34 #include "libavutil/mathematics.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/timer.h"
42 #include "mpegvideo.h"
45 #include "mjpegenc_common.h"
47 #include "mpegutils.h"
50 #include "pixblockdsp.h"
54 #include "aandcttab.h"
56 #include "mpeg4video.h"
58 #include "bytestream.h"
63 #define QUANT_BIAS_SHIFT 8
65 #define QMAT_SHIFT_MMX 16
68 static int encode_picture(MpegEncContext *s, int picture_number);
69 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
70 static int sse_mb(MpegEncContext *s);
71 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
72 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
74 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
75 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
77 const AVOption ff_mpv_generic_options[] = {
82 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
83 uint16_t (*qmat16)[2][64],
84 const uint16_t *quant_matrix,
85 int bias, int qmin, int qmax, int intra)
87 FDCTDSPContext *fdsp = &s->fdsp;
91 for (qscale = qmin; qscale <= qmax; qscale++) {
93 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
95 fdsp->fdct == ff_faandct ||
96 #endif /* CONFIG_FAANDCT */
97 fdsp->fdct == ff_jpeg_fdct_islow_10) {
98 for (i = 0; i < 64; i++) {
99 const int j = s->idsp.idct_permutation[i];
100 int64_t den = (int64_t) qscale * quant_matrix[j];
101 /* 16 <= qscale * quant_matrix[i] <= 7905
102 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
103 * 19952 <= x <= 249205026
104 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
105 * 3444240 >= (1 << 36) / (x) >= 275 */
107 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / den);
109 } else if (fdsp->fdct == ff_fdct_ifast) {
110 for (i = 0; i < 64; i++) {
111 const int j = s->idsp.idct_permutation[i];
112 int64_t den = ff_aanscales[i] * (int64_t) qscale * quant_matrix[j];
113 /* 16 <= qscale * quant_matrix[i] <= 7905
114 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
115 * 19952 <= x <= 249205026
116 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
117 * 3444240 >= (1 << 36) / (x) >= 275 */
119 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) / den);
122 for (i = 0; i < 64; i++) {
123 const int j = s->idsp.idct_permutation[i];
124 int64_t den = (int64_t) qscale * quant_matrix[j];
125 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
126 * Assume x = qscale * quant_matrix[i]
128 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
129 * so 32768 >= (1 << 19) / (x) >= 67 */
130 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / den);
131 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
132 // (qscale * quant_matrix[i]);
133 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / den;
135 if (qmat16[qscale][0][i] == 0 ||
136 qmat16[qscale][0][i] == 128 * 256)
137 qmat16[qscale][0][i] = 128 * 256 - 1;
138 qmat16[qscale][1][i] =
139 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
140 qmat16[qscale][0][i]);
144 for (i = intra; i < 64; i++) {
146 if (fdsp->fdct == ff_fdct_ifast) {
147 max = (8191LL * ff_aanscales[i]) >> 14;
149 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
155 av_log(NULL, AV_LOG_INFO,
156 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
161 static inline void update_qscale(MpegEncContext *s)
163 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
164 (FF_LAMBDA_SHIFT + 7);
165 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
167 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
171 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
177 for (i = 0; i < 64; i++) {
178 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
185 * init s->current_picture.qscale_table from s->lambda_table
187 void ff_init_qscale_tab(MpegEncContext *s)
189 int8_t * const qscale_table = s->current_picture.qscale_table;
192 for (i = 0; i < s->mb_num; i++) {
193 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
194 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
195 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
200 static void update_duplicate_context_after_me(MpegEncContext *dst,
203 #define COPY(a) dst->a= src->a
205 COPY(current_picture);
211 COPY(picture_in_gop_number);
212 COPY(gop_picture_number);
213 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
214 COPY(progressive_frame); // FIXME don't set in encode_header
215 COPY(partitioned_frame); // FIXME don't set in encode_header
220 * Set the given MpegEncContext to defaults for encoding.
221 * the changed fields will not depend upon the prior state of the MpegEncContext.
223 static void mpv_encode_defaults(MpegEncContext *s)
226 ff_mpv_common_defaults(s);
228 for (i = -16; i < 16; i++) {
229 default_fcode_tab[i + MAX_MV] = 1;
231 s->me.mv_penalty = default_mv_penalty;
232 s->fcode_tab = default_fcode_tab;
234 s->input_picture_number = 0;
235 s->picture_in_gop_number = 0;
238 av_cold int ff_dct_encode_init(MpegEncContext *s) {
240 ff_dct_encode_init_x86(s);
242 if (CONFIG_H263_ENCODER)
243 ff_h263dsp_init(&s->h263dsp);
244 if (!s->dct_quantize)
245 s->dct_quantize = ff_dct_quantize_c;
247 s->denoise_dct = denoise_dct_c;
248 s->fast_dct_quantize = s->dct_quantize;
249 if (s->avctx->trellis)
250 s->dct_quantize = dct_quantize_trellis_c;
255 /* init video encoder */
256 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
258 MpegEncContext *s = avctx->priv_data;
259 int i, ret, format_supported;
261 mpv_encode_defaults(s);
263 switch (avctx->codec_id) {
264 case AV_CODEC_ID_MPEG2VIDEO:
265 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
266 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
267 av_log(avctx, AV_LOG_ERROR,
268 "only YUV420 and YUV422 are supported\n");
272 case AV_CODEC_ID_MJPEG:
273 case AV_CODEC_ID_AMV:
274 format_supported = 0;
275 /* JPEG color space */
276 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
277 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
278 avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
279 (avctx->color_range == AVCOL_RANGE_JPEG &&
280 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
281 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
282 avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
283 format_supported = 1;
284 /* MPEG color space */
285 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
286 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
287 avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
288 avctx->pix_fmt == AV_PIX_FMT_YUV444P))
289 format_supported = 1;
291 if (!format_supported) {
292 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
297 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
298 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
303 switch (avctx->pix_fmt) {
304 case AV_PIX_FMT_YUVJ444P:
305 case AV_PIX_FMT_YUV444P:
306 s->chroma_format = CHROMA_444;
308 case AV_PIX_FMT_YUVJ422P:
309 case AV_PIX_FMT_YUV422P:
310 s->chroma_format = CHROMA_422;
312 case AV_PIX_FMT_YUVJ420P:
313 case AV_PIX_FMT_YUV420P:
315 s->chroma_format = CHROMA_420;
319 s->bit_rate = avctx->bit_rate;
320 s->width = avctx->width;
321 s->height = avctx->height;
322 if (avctx->gop_size > 600 &&
323 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
324 av_log(avctx, AV_LOG_WARNING,
325 "keyframe interval too large!, reducing it from %d to %d\n",
326 avctx->gop_size, 600);
327 avctx->gop_size = 600;
329 s->gop_size = avctx->gop_size;
331 if (avctx->max_b_frames > MAX_B_FRAMES) {
332 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
333 "is %d.\n", MAX_B_FRAMES);
334 avctx->max_b_frames = MAX_B_FRAMES;
336 s->max_b_frames = avctx->max_b_frames;
337 s->codec_id = avctx->codec->id;
338 s->strict_std_compliance = avctx->strict_std_compliance;
339 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
340 s->mpeg_quant = avctx->mpeg_quant;
341 s->rtp_mode = !!avctx->rtp_payload_size;
342 s->intra_dc_precision = avctx->intra_dc_precision;
344 // workaround some differences between how applications specify dc precision
345 if (s->intra_dc_precision < 0) {
346 s->intra_dc_precision += 8;
347 } else if (s->intra_dc_precision >= 8)
348 s->intra_dc_precision -= 8;
350 if (s->intra_dc_precision < 0) {
351 av_log(avctx, AV_LOG_ERROR,
352 "intra dc precision must be positive, note some applications use"
353 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
354 return AVERROR(EINVAL);
357 if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
358 av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
359 return AVERROR(EINVAL);
361 s->user_specified_pts = AV_NOPTS_VALUE;
363 if (s->gop_size <= 1) {
370 s->me_method = avctx->me_method;
373 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
376 FF_DISABLE_DEPRECATION_WARNINGS
377 if (avctx->border_masking != 0.0)
378 s->border_masking = avctx->border_masking;
379 FF_ENABLE_DEPRECATION_WARNINGS
382 s->adaptive_quant = (s->avctx->lumi_masking ||
383 s->avctx->dark_masking ||
384 s->avctx->temporal_cplx_masking ||
385 s->avctx->spatial_cplx_masking ||
386 s->avctx->p_masking ||
388 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
391 s->loop_filter = !!(s->avctx->flags & CODEC_FLAG_LOOP_FILTER);
393 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
394 switch(avctx->codec_id) {
395 case AV_CODEC_ID_MPEG1VIDEO:
396 case AV_CODEC_ID_MPEG2VIDEO:
397 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
399 case AV_CODEC_ID_MPEG4:
400 case AV_CODEC_ID_MSMPEG4V1:
401 case AV_CODEC_ID_MSMPEG4V2:
402 case AV_CODEC_ID_MSMPEG4V3:
403 if (avctx->rc_max_rate >= 15000000) {
404 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
405 } else if(avctx->rc_max_rate >= 2000000) {
406 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
407 } else if(avctx->rc_max_rate >= 384000) {
408 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
410 avctx->rc_buffer_size = 40;
411 avctx->rc_buffer_size *= 16384;
414 if (avctx->rc_buffer_size) {
415 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
419 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
420 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
424 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
425 av_log(avctx, AV_LOG_INFO,
426 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
429 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
430 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
434 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
435 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
439 if (avctx->rc_max_rate &&
440 avctx->rc_max_rate == avctx->bit_rate &&
441 avctx->rc_max_rate != avctx->rc_min_rate) {
442 av_log(avctx, AV_LOG_INFO,
443 "impossible bitrate constraints, this will fail\n");
446 if (avctx->rc_buffer_size &&
447 avctx->bit_rate * (int64_t)avctx->time_base.num >
448 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
449 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
453 if (!s->fixed_qscale &&
454 avctx->bit_rate * av_q2d(avctx->time_base) >
455 avctx->bit_rate_tolerance) {
456 av_log(avctx, AV_LOG_WARNING,
457 "bitrate tolerance %d too small for bitrate %d, overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
458 avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
461 if (s->avctx->rc_max_rate &&
462 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
463 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
464 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
465 90000LL * (avctx->rc_buffer_size - 1) >
466 s->avctx->rc_max_rate * 0xFFFFLL) {
467 av_log(avctx, AV_LOG_INFO,
468 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
469 "specified vbv buffer is too large for the given bitrate!\n");
472 if ((s->avctx->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
473 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
474 s->codec_id != AV_CODEC_ID_FLV1) {
475 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
479 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
480 av_log(avctx, AV_LOG_ERROR,
481 "OBMC is only supported with simple mb decision\n");
485 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
486 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
490 if (s->max_b_frames &&
491 s->codec_id != AV_CODEC_ID_MPEG4 &&
492 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
493 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
494 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
497 if (s->max_b_frames < 0) {
498 av_log(avctx, AV_LOG_ERROR,
499 "max b frames must be 0 or positive for mpegvideo based encoders\n");
503 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
504 s->codec_id == AV_CODEC_ID_H263 ||
505 s->codec_id == AV_CODEC_ID_H263P) &&
506 (avctx->sample_aspect_ratio.num > 255 ||
507 avctx->sample_aspect_ratio.den > 255)) {
508 av_log(avctx, AV_LOG_WARNING,
509 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
510 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
511 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
512 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
515 if ((s->codec_id == AV_CODEC_ID_H263 ||
516 s->codec_id == AV_CODEC_ID_H263P) &&
517 (avctx->width > 2048 ||
518 avctx->height > 1152 )) {
519 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
522 if ((s->codec_id == AV_CODEC_ID_H263 ||
523 s->codec_id == AV_CODEC_ID_H263P) &&
524 ((avctx->width &3) ||
525 (avctx->height&3) )) {
526 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
530 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
531 (avctx->width > 4095 ||
532 avctx->height > 4095 )) {
533 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
537 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
538 (avctx->width > 16383 ||
539 avctx->height > 16383 )) {
540 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
544 if (s->codec_id == AV_CODEC_ID_RV10 &&
546 avctx->height&15 )) {
547 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
548 return AVERROR(EINVAL);
551 if (s->codec_id == AV_CODEC_ID_RV20 &&
554 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
555 return AVERROR(EINVAL);
558 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
559 s->codec_id == AV_CODEC_ID_WMV2) &&
561 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
565 if ((s->avctx->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
566 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
567 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
571 // FIXME mpeg2 uses that too
572 if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
573 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
574 av_log(avctx, AV_LOG_ERROR,
575 "mpeg2 style quantization not supported by codec\n");
579 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
580 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
584 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
585 s->avctx->mb_decision != FF_MB_DECISION_RD) {
586 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
590 if (s->avctx->scenechange_threshold < 1000000000 &&
591 (s->avctx->flags & CODEC_FLAG_CLOSED_GOP)) {
592 av_log(avctx, AV_LOG_ERROR,
593 "closed gop with scene change detection are not supported yet, "
594 "set threshold to 1000000000\n");
598 if (s->avctx->flags & CODEC_FLAG_LOW_DELAY) {
599 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
600 av_log(avctx, AV_LOG_ERROR,
601 "low delay forcing is only available for mpeg2\n");
604 if (s->max_b_frames != 0) {
605 av_log(avctx, AV_LOG_ERROR,
606 "b frames cannot be used with low delay\n");
611 if (s->q_scale_type == 1) {
612 if (avctx->qmax > 12) {
613 av_log(avctx, AV_LOG_ERROR,
614 "non linear quant only supports qmax <= 12 currently\n");
619 if (s->avctx->thread_count > 1 &&
620 s->codec_id != AV_CODEC_ID_MPEG4 &&
621 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
622 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
623 s->codec_id != AV_CODEC_ID_MJPEG &&
624 (s->codec_id != AV_CODEC_ID_H263P)) {
625 av_log(avctx, AV_LOG_ERROR,
626 "multi threaded encoding not supported by codec\n");
630 if (s->avctx->thread_count < 1) {
631 av_log(avctx, AV_LOG_ERROR,
632 "automatic thread number detection not supported by codec, "
637 if (s->avctx->slices > 1 || s->avctx->thread_count > 1)
640 if (s->avctx->thread_count > 1 && s->codec_id == AV_CODEC_ID_H263P)
641 s->h263_slice_structured = 1;
643 if (!avctx->time_base.den || !avctx->time_base.num) {
644 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
648 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
649 av_log(avctx, AV_LOG_INFO,
650 "notice: b_frame_strategy only affects the first pass\n");
651 avctx->b_frame_strategy = 0;
654 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
656 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
657 avctx->time_base.den /= i;
658 avctx->time_base.num /= i;
662 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
663 // (a + x * 3 / 8) / x
664 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
665 s->inter_quant_bias = 0;
667 s->intra_quant_bias = 0;
669 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
672 if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
673 av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
674 return AVERROR(EINVAL);
677 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
678 s->intra_quant_bias = avctx->intra_quant_bias;
679 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
680 s->inter_quant_bias = avctx->inter_quant_bias;
682 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
684 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
685 s->avctx->time_base.den > (1 << 16) - 1) {
686 av_log(avctx, AV_LOG_ERROR,
687 "timebase %d/%d not supported by MPEG 4 standard, "
688 "the maximum admitted value for the timebase denominator "
689 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
693 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
695 switch (avctx->codec->id) {
696 case AV_CODEC_ID_MPEG1VIDEO:
697 s->out_format = FMT_MPEG1;
698 s->low_delay = !!(s->avctx->flags & CODEC_FLAG_LOW_DELAY);
699 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
701 case AV_CODEC_ID_MPEG2VIDEO:
702 s->out_format = FMT_MPEG1;
703 s->low_delay = !!(s->avctx->flags & CODEC_FLAG_LOW_DELAY);
704 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
707 case AV_CODEC_ID_MJPEG:
708 case AV_CODEC_ID_AMV:
709 s->out_format = FMT_MJPEG;
710 s->intra_only = 1; /* force intra only for jpeg */
711 if (!CONFIG_MJPEG_ENCODER ||
712 ff_mjpeg_encode_init(s) < 0)
717 case AV_CODEC_ID_H261:
718 if (!CONFIG_H261_ENCODER)
720 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
721 av_log(avctx, AV_LOG_ERROR,
722 "The specified picture size of %dx%d is not valid for the "
723 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
724 s->width, s->height);
727 s->out_format = FMT_H261;
730 s->rtp_mode = 0; /* Sliced encoding not supported */
732 case AV_CODEC_ID_H263:
733 if (!CONFIG_H263_ENCODER)
735 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
736 s->width, s->height) == 8) {
737 av_log(avctx, AV_LOG_ERROR,
738 "The specified picture size of %dx%d is not valid for "
739 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
740 "352x288, 704x576, and 1408x1152. "
741 "Try H.263+.\n", s->width, s->height);
744 s->out_format = FMT_H263;
748 case AV_CODEC_ID_H263P:
749 s->out_format = FMT_H263;
752 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
753 s->modified_quant = s->h263_aic;
754 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
755 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
758 /* These are just to be sure */
762 case AV_CODEC_ID_FLV1:
763 s->out_format = FMT_H263;
764 s->h263_flv = 2; /* format = 1; 11-bit codes */
765 s->unrestricted_mv = 1;
766 s->rtp_mode = 0; /* don't allow GOB */
770 case AV_CODEC_ID_RV10:
771 s->out_format = FMT_H263;
775 case AV_CODEC_ID_RV20:
776 s->out_format = FMT_H263;
779 s->modified_quant = 1;
783 s->unrestricted_mv = 0;
785 case AV_CODEC_ID_MPEG4:
786 s->out_format = FMT_H263;
788 s->unrestricted_mv = 1;
789 s->low_delay = s->max_b_frames ? 0 : 1;
790 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
792 case AV_CODEC_ID_MSMPEG4V2:
793 s->out_format = FMT_H263;
795 s->unrestricted_mv = 1;
796 s->msmpeg4_version = 2;
800 case AV_CODEC_ID_MSMPEG4V3:
801 s->out_format = FMT_H263;
803 s->unrestricted_mv = 1;
804 s->msmpeg4_version = 3;
805 s->flipflop_rounding = 1;
809 case AV_CODEC_ID_WMV1:
810 s->out_format = FMT_H263;
812 s->unrestricted_mv = 1;
813 s->msmpeg4_version = 4;
814 s->flipflop_rounding = 1;
818 case AV_CODEC_ID_WMV2:
819 s->out_format = FMT_H263;
821 s->unrestricted_mv = 1;
822 s->msmpeg4_version = 5;
823 s->flipflop_rounding = 1;
831 avctx->has_b_frames = !s->low_delay;
835 s->progressive_frame =
836 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
837 CODEC_FLAG_INTERLACED_ME) ||
842 if (ff_mpv_common_init(s) < 0)
845 ff_fdctdsp_init(&s->fdsp, avctx);
846 ff_me_cmp_init(&s->mecc, avctx);
847 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
848 ff_pixblockdsp_init(&s->pdsp, avctx);
849 ff_qpeldsp_init(&s->qdsp);
851 s->avctx->coded_frame = s->current_picture.f;
853 if (s->msmpeg4_version) {
854 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
855 2 * 2 * (MAX_LEVEL + 1) *
856 (MAX_RUN + 1) * 2 * sizeof(int), fail);
858 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
860 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
861 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
862 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
863 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
864 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
865 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
866 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
867 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
868 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
869 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
871 if (s->avctx->noise_reduction) {
872 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
873 2 * 64 * sizeof(uint16_t), fail);
876 ff_dct_encode_init(s);
878 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
879 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
881 s->quant_precision = 5;
883 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
884 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->avctx->frame_skip_cmp);
886 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
887 ff_h261_encode_init(s);
888 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
889 ff_h263_encode_init(s);
890 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
891 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
893 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
894 && s->out_format == FMT_MPEG1)
895 ff_mpeg1_encode_init(s);
898 for (i = 0; i < 64; i++) {
899 int j = s->idsp.idct_permutation[i];
900 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
902 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
903 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
904 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
906 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
909 s->chroma_intra_matrix[j] =
910 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
911 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
913 if (s->avctx->intra_matrix)
914 s->intra_matrix[j] = s->avctx->intra_matrix[i];
915 if (s->avctx->inter_matrix)
916 s->inter_matrix[j] = s->avctx->inter_matrix[i];
919 /* precompute matrix */
920 /* for mjpeg, we do include qscale in the matrix */
921 if (s->out_format != FMT_MJPEG) {
922 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
923 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
925 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
926 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
930 if (ff_rate_control_init(s) < 0)
933 #if FF_API_ERROR_RATE
934 FF_DISABLE_DEPRECATION_WARNINGS
935 if (avctx->error_rate)
936 s->error_rate = avctx->error_rate;
937 FF_ENABLE_DEPRECATION_WARNINGS;
940 #if FF_API_NORMALIZE_AQP
941 FF_DISABLE_DEPRECATION_WARNINGS
942 if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
943 s->mpv_flags |= FF_MPV_FLAG_NAQ;
944 FF_ENABLE_DEPRECATION_WARNINGS;
948 FF_DISABLE_DEPRECATION_WARNINGS
949 if (avctx->flags & CODEC_FLAG_MV0)
950 s->mpv_flags |= FF_MPV_FLAG_MV0;
951 FF_ENABLE_DEPRECATION_WARNINGS
955 FF_DISABLE_DEPRECATION_WARNINGS
956 if (avctx->rc_qsquish != 0.0)
957 s->rc_qsquish = avctx->rc_qsquish;
958 if (avctx->rc_qmod_amp != 0.0)
959 s->rc_qmod_amp = avctx->rc_qmod_amp;
960 if (avctx->rc_qmod_freq)
961 s->rc_qmod_freq = avctx->rc_qmod_freq;
962 if (avctx->rc_buffer_aggressivity != 1.0)
963 s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
964 if (avctx->rc_initial_cplx != 0.0)
965 s->rc_initial_cplx = avctx->rc_initial_cplx;
967 s->lmin = avctx->lmin;
969 s->lmax = avctx->lmax;
973 s->rc_eq = av_strdup(avctx->rc_eq);
975 return AVERROR(ENOMEM);
977 FF_ENABLE_DEPRECATION_WARNINGS
980 if (avctx->b_frame_strategy == 2) {
981 for (i = 0; i < s->max_b_frames + 2; i++) {
982 s->tmp_frames[i] = av_frame_alloc();
983 if (!s->tmp_frames[i])
984 return AVERROR(ENOMEM);
986 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
987 s->tmp_frames[i]->width = s->width >> avctx->brd_scale;
988 s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
990 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
998 ff_mpv_encode_end(avctx);
999 return AVERROR_UNKNOWN;
1002 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
1004 MpegEncContext *s = avctx->priv_data;
1007 ff_rate_control_uninit(s);
1009 ff_mpv_common_end(s);
1010 if (CONFIG_MJPEG_ENCODER &&
1011 s->out_format == FMT_MJPEG)
1012 ff_mjpeg_encode_close(s);
1014 av_freep(&avctx->extradata);
1016 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1017 av_frame_free(&s->tmp_frames[i]);
1019 ff_free_picture_tables(&s->new_picture);
1020 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1022 av_freep(&s->avctx->stats_out);
1023 av_freep(&s->ac_stats);
1025 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1026 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1027 s->q_chroma_intra_matrix= NULL;
1028 s->q_chroma_intra_matrix16= NULL;
1029 av_freep(&s->q_intra_matrix);
1030 av_freep(&s->q_inter_matrix);
1031 av_freep(&s->q_intra_matrix16);
1032 av_freep(&s->q_inter_matrix16);
1033 av_freep(&s->input_picture);
1034 av_freep(&s->reordered_input_picture);
1035 av_freep(&s->dct_offset);
1040 static int get_sae(uint8_t *src, int ref, int stride)
1045 for (y = 0; y < 16; y++) {
1046 for (x = 0; x < 16; x++) {
1047 acc += FFABS(src[x + y * stride] - ref);
1054 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1055 uint8_t *ref, int stride)
1061 h = s->height & ~15;
1063 for (y = 0; y < h; y += 16) {
1064 for (x = 0; x < w; x += 16) {
1065 int offset = x + y * stride;
1066 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1068 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1069 int sae = get_sae(src + offset, mean, stride);
1071 acc += sae + 500 < sad;
1078 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1080 Picture *pic = NULL;
1082 int i, display_picture_number = 0, ret;
1083 const int encoding_delay = s->max_b_frames ? s->max_b_frames :
1084 (s->low_delay ? 0 : 1);
1089 display_picture_number = s->input_picture_number++;
1091 if (pts != AV_NOPTS_VALUE) {
1092 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1093 int64_t last = s->user_specified_pts;
1096 av_log(s->avctx, AV_LOG_ERROR,
1097 "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1099 return AVERROR(EINVAL);
1102 if (!s->low_delay && display_picture_number == 1)
1103 s->dts_delta = pts - last;
1105 s->user_specified_pts = pts;
1107 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1108 s->user_specified_pts =
1109 pts = s->user_specified_pts + 1;
1110 av_log(s->avctx, AV_LOG_INFO,
1111 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1114 pts = display_picture_number;
1120 if (!pic_arg->buf[0] ||
1121 pic_arg->linesize[0] != s->linesize ||
1122 pic_arg->linesize[1] != s->uvlinesize ||
1123 pic_arg->linesize[2] != s->uvlinesize)
1125 if ((s->width & 15) || (s->height & 15))
1127 if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1129 if (s->linesize & (STRIDE_ALIGN-1))
1132 ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1133 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1135 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1139 pic = &s->picture[i];
1143 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1146 ret = ff_alloc_picture(s, pic, direct);
1151 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1152 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1153 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1156 int h_chroma_shift, v_chroma_shift;
1157 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1161 for (i = 0; i < 3; i++) {
1162 int src_stride = pic_arg->linesize[i];
1163 int dst_stride = i ? s->uvlinesize : s->linesize;
1164 int h_shift = i ? h_chroma_shift : 0;
1165 int v_shift = i ? v_chroma_shift : 0;
1166 int w = s->width >> h_shift;
1167 int h = s->height >> v_shift;
1168 uint8_t *src = pic_arg->data[i];
1169 uint8_t *dst = pic->f->data[i];
1172 if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1173 && !s->progressive_sequence
1174 && FFALIGN(s->height, 32) - s->height > 16)
1177 if (!s->avctx->rc_buffer_size)
1178 dst += INPLACE_OFFSET;
1180 if (src_stride == dst_stride)
1181 memcpy(dst, src, src_stride * h);
1184 uint8_t *dst2 = dst;
1186 memcpy(dst2, src, w);
1191 if ((s->width & 15) || (s->height & (vpad-1))) {
1192 s->mpvencdsp.draw_edges(dst, dst_stride,
1201 ret = av_frame_copy_props(pic->f, pic_arg);
1205 pic->f->display_picture_number = display_picture_number;
1206 pic->f->pts = pts; // we set this here to avoid modifiying pic_arg
1209 /* shift buffer entries */
1210 for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1211 s->input_picture[i - 1] = s->input_picture[i];
1213 s->input_picture[encoding_delay] = (Picture*) pic;
1218 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1222 int64_t score64 = 0;
1224 for (plane = 0; plane < 3; plane++) {
1225 const int stride = p->f->linesize[plane];
1226 const int bw = plane ? 1 : 2;
1227 for (y = 0; y < s->mb_height * bw; y++) {
1228 for (x = 0; x < s->mb_width * bw; x++) {
1229 int off = p->shared ? 0 : 16;
1230 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1231 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1232 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1234 switch (FFABS(s->avctx->frame_skip_exp)) {
1235 case 0: score = FFMAX(score, v); break;
1236 case 1: score += FFABS(v); break;
1237 case 2: score64 += v * (int64_t)v; break;
1238 case 3: score64 += FFABS(v * (int64_t)v * v); break;
1239 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1248 if (s->avctx->frame_skip_exp < 0)
1249 score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1250 -1.0/s->avctx->frame_skip_exp);
1252 if (score64 < s->avctx->frame_skip_threshold)
1254 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1259 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1261 AVPacket pkt = { 0 };
1262 int ret, got_output;
1264 av_init_packet(&pkt);
1265 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1270 av_free_packet(&pkt);
1274 static int estimate_best_b_count(MpegEncContext *s)
1276 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1277 AVCodecContext *c = avcodec_alloc_context3(NULL);
1278 const int scale = s->avctx->brd_scale;
1279 int i, j, out_size, p_lambda, b_lambda, lambda2;
1280 int64_t best_rd = INT64_MAX;
1281 int best_b_count = -1;
1284 return AVERROR(ENOMEM);
1285 av_assert0(scale >= 0 && scale <= 3);
1288 //s->next_picture_ptr->quality;
1289 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1290 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1291 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1292 if (!b_lambda) // FIXME we should do this somewhere else
1293 b_lambda = p_lambda;
1294 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1297 c->width = s->width >> scale;
1298 c->height = s->height >> scale;
1299 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR;
1300 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1301 c->mb_decision = s->avctx->mb_decision;
1302 c->me_cmp = s->avctx->me_cmp;
1303 c->mb_cmp = s->avctx->mb_cmp;
1304 c->me_sub_cmp = s->avctx->me_sub_cmp;
1305 c->pix_fmt = AV_PIX_FMT_YUV420P;
1306 c->time_base = s->avctx->time_base;
1307 c->max_b_frames = s->max_b_frames;
1309 if (avcodec_open2(c, codec, NULL) < 0)
1312 for (i = 0; i < s->max_b_frames + 2; i++) {
1313 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1314 s->next_picture_ptr;
1317 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1318 pre_input = *pre_input_ptr;
1319 memcpy(data, pre_input_ptr->f->data, sizeof(data));
1321 if (!pre_input.shared && i) {
1322 data[0] += INPLACE_OFFSET;
1323 data[1] += INPLACE_OFFSET;
1324 data[2] += INPLACE_OFFSET;
1327 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1328 s->tmp_frames[i]->linesize[0],
1330 pre_input.f->linesize[0],
1331 c->width, c->height);
1332 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1333 s->tmp_frames[i]->linesize[1],
1335 pre_input.f->linesize[1],
1336 c->width >> 1, c->height >> 1);
1337 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1338 s->tmp_frames[i]->linesize[2],
1340 pre_input.f->linesize[2],
1341 c->width >> 1, c->height >> 1);
1345 for (j = 0; j < s->max_b_frames + 1; j++) {
1348 if (!s->input_picture[j])
1351 c->error[0] = c->error[1] = c->error[2] = 0;
1353 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1354 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1356 out_size = encode_frame(c, s->tmp_frames[0]);
1358 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1360 for (i = 0; i < s->max_b_frames + 1; i++) {
1361 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1363 s->tmp_frames[i + 1]->pict_type = is_p ?
1364 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1365 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1367 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1369 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1372 /* get the delayed frames */
1374 out_size = encode_frame(c, NULL);
1375 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1378 rd += c->error[0] + c->error[1] + c->error[2];
1389 return best_b_count;
1392 static int select_input_picture(MpegEncContext *s)
1396 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1397 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1398 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1400 /* set next picture type & ordering */
1401 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1402 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1403 if (s->picture_in_gop_number < s->gop_size &&
1404 s->next_picture_ptr &&
1405 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1406 // FIXME check that te gop check above is +-1 correct
1407 av_frame_unref(s->input_picture[0]->f);
1409 ff_vbv_update(s, 0);
1415 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1416 !s->next_picture_ptr || s->intra_only) {
1417 s->reordered_input_picture[0] = s->input_picture[0];
1418 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1419 s->reordered_input_picture[0]->f->coded_picture_number =
1420 s->coded_picture_number++;
1424 if (s->avctx->flags & CODEC_FLAG_PASS2) {
1425 for (i = 0; i < s->max_b_frames + 1; i++) {
1426 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1428 if (pict_num >= s->rc_context.num_entries)
1430 if (!s->input_picture[i]) {
1431 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1435 s->input_picture[i]->f->pict_type =
1436 s->rc_context.entry[pict_num].new_pict_type;
1440 if (s->avctx->b_frame_strategy == 0) {
1441 b_frames = s->max_b_frames;
1442 while (b_frames && !s->input_picture[b_frames])
1444 } else if (s->avctx->b_frame_strategy == 1) {
1445 for (i = 1; i < s->max_b_frames + 1; i++) {
1446 if (s->input_picture[i] &&
1447 s->input_picture[i]->b_frame_score == 0) {
1448 s->input_picture[i]->b_frame_score =
1450 s->input_picture[i ]->f->data[0],
1451 s->input_picture[i - 1]->f->data[0],
1455 for (i = 0; i < s->max_b_frames + 1; i++) {
1456 if (!s->input_picture[i] ||
1457 s->input_picture[i]->b_frame_score - 1 >
1458 s->mb_num / s->avctx->b_sensitivity)
1462 b_frames = FFMAX(0, i - 1);
1465 for (i = 0; i < b_frames + 1; i++) {
1466 s->input_picture[i]->b_frame_score = 0;
1468 } else if (s->avctx->b_frame_strategy == 2) {
1469 b_frames = estimate_best_b_count(s);
1471 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1477 for (i = b_frames - 1; i >= 0; i--) {
1478 int type = s->input_picture[i]->f->pict_type;
1479 if (type && type != AV_PICTURE_TYPE_B)
1482 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1483 b_frames == s->max_b_frames) {
1484 av_log(s->avctx, AV_LOG_ERROR,
1485 "warning, too many b frames in a row\n");
1488 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1489 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1490 s->gop_size > s->picture_in_gop_number) {
1491 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1493 if (s->avctx->flags & CODEC_FLAG_CLOSED_GOP)
1495 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1499 if ((s->avctx->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1500 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1503 s->reordered_input_picture[0] = s->input_picture[b_frames];
1504 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1505 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1506 s->reordered_input_picture[0]->f->coded_picture_number =
1507 s->coded_picture_number++;
1508 for (i = 0; i < b_frames; i++) {
1509 s->reordered_input_picture[i + 1] = s->input_picture[i];
1510 s->reordered_input_picture[i + 1]->f->pict_type =
1512 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1513 s->coded_picture_number++;
1518 if (s->reordered_input_picture[0]) {
1519 s->reordered_input_picture[0]->reference =
1520 s->reordered_input_picture[0]->f->pict_type !=
1521 AV_PICTURE_TYPE_B ? 3 : 0;
1523 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1524 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1527 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1528 // input is a shared pix, so we can't modifiy it -> alloc a new
1529 // one & ensure that the shared one is reuseable
1532 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1535 pic = &s->picture[i];
1537 pic->reference = s->reordered_input_picture[0]->reference;
1538 if (ff_alloc_picture(s, pic, 0) < 0) {
1542 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1546 /* mark us unused / free shared pic */
1547 av_frame_unref(s->reordered_input_picture[0]->f);
1548 s->reordered_input_picture[0]->shared = 0;
1550 s->current_picture_ptr = pic;
1552 // input is not a shared pix -> reuse buffer for current_pix
1553 s->current_picture_ptr = s->reordered_input_picture[0];
1554 for (i = 0; i < 4; i++) {
1555 s->new_picture.f->data[i] += INPLACE_OFFSET;
1558 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1559 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1560 s->current_picture_ptr)) < 0)
1563 s->picture_number = s->new_picture.f->display_picture_number;
1565 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1570 static void frame_end(MpegEncContext *s)
1572 if (s->unrestricted_mv &&
1573 s->current_picture.reference &&
1575 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1576 int hshift = desc->log2_chroma_w;
1577 int vshift = desc->log2_chroma_h;
1578 s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1579 s->current_picture.f->linesize[0],
1580 s->h_edge_pos, s->v_edge_pos,
1581 EDGE_WIDTH, EDGE_WIDTH,
1582 EDGE_TOP | EDGE_BOTTOM);
1583 s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1584 s->current_picture.f->linesize[1],
1585 s->h_edge_pos >> hshift,
1586 s->v_edge_pos >> vshift,
1587 EDGE_WIDTH >> hshift,
1588 EDGE_WIDTH >> vshift,
1589 EDGE_TOP | EDGE_BOTTOM);
1590 s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1591 s->current_picture.f->linesize[2],
1592 s->h_edge_pos >> hshift,
1593 s->v_edge_pos >> vshift,
1594 EDGE_WIDTH >> hshift,
1595 EDGE_WIDTH >> vshift,
1596 EDGE_TOP | EDGE_BOTTOM);
1601 s->last_pict_type = s->pict_type;
1602 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1603 if (s->pict_type!= AV_PICTURE_TYPE_B)
1604 s->last_non_b_pict_type = s->pict_type;
1606 s->avctx->coded_frame = s->current_picture_ptr->f;
1610 static void update_noise_reduction(MpegEncContext *s)
1614 for (intra = 0; intra < 2; intra++) {
1615 if (s->dct_count[intra] > (1 << 16)) {
1616 for (i = 0; i < 64; i++) {
1617 s->dct_error_sum[intra][i] >>= 1;
1619 s->dct_count[intra] >>= 1;
1622 for (i = 0; i < 64; i++) {
1623 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1624 s->dct_count[intra] +
1625 s->dct_error_sum[intra][i] / 2) /
1626 (s->dct_error_sum[intra][i] + 1);
1631 static int frame_start(MpegEncContext *s)
1635 /* mark & release old frames */
1636 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1637 s->last_picture_ptr != s->next_picture_ptr &&
1638 s->last_picture_ptr->f->buf[0]) {
1639 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1642 s->current_picture_ptr->f->pict_type = s->pict_type;
1643 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1645 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1646 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1647 s->current_picture_ptr)) < 0)
1650 if (s->pict_type != AV_PICTURE_TYPE_B) {
1651 s->last_picture_ptr = s->next_picture_ptr;
1653 s->next_picture_ptr = s->current_picture_ptr;
1656 if (s->last_picture_ptr) {
1657 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1658 if (s->last_picture_ptr->f->buf[0] &&
1659 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1660 s->last_picture_ptr)) < 0)
1663 if (s->next_picture_ptr) {
1664 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1665 if (s->next_picture_ptr->f->buf[0] &&
1666 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1667 s->next_picture_ptr)) < 0)
1671 if (s->picture_structure!= PICT_FRAME) {
1673 for (i = 0; i < 4; i++) {
1674 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1675 s->current_picture.f->data[i] +=
1676 s->current_picture.f->linesize[i];
1678 s->current_picture.f->linesize[i] *= 2;
1679 s->last_picture.f->linesize[i] *= 2;
1680 s->next_picture.f->linesize[i] *= 2;
1684 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1685 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1686 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1687 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1688 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1689 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1691 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1692 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1695 if (s->dct_error_sum) {
1696 av_assert2(s->avctx->noise_reduction && s->encoding);
1697 update_noise_reduction(s);
1703 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1704 const AVFrame *pic_arg, int *got_packet)
1706 MpegEncContext *s = avctx->priv_data;
1707 int i, stuffing_count, ret;
1708 int context_count = s->slice_context_count;
1710 s->picture_in_gop_number++;
1712 if (load_input_picture(s, pic_arg) < 0)
1715 if (select_input_picture(s) < 0) {
1720 if (s->new_picture.f->data[0]) {
1721 int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1722 int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - FF_INPUT_BUFFER_PADDING_SIZE
1724 s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1725 if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size)) < 0)
1728 s->mb_info_ptr = av_packet_new_side_data(pkt,
1729 AV_PKT_DATA_H263_MB_INFO,
1730 s->mb_width*s->mb_height*12);
1731 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1734 for (i = 0; i < context_count; i++) {
1735 int start_y = s->thread_context[i]->start_mb_y;
1736 int end_y = s->thread_context[i]-> end_mb_y;
1737 int h = s->mb_height;
1738 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1739 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1741 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1744 s->pict_type = s->new_picture.f->pict_type;
1746 ret = frame_start(s);
1750 ret = encode_picture(s, s->picture_number);
1751 if (growing_buffer) {
1752 av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1753 pkt->data = s->pb.buf;
1754 pkt->size = avctx->internal->byte_buffer_size;
1759 avctx->header_bits = s->header_bits;
1760 avctx->mv_bits = s->mv_bits;
1761 avctx->misc_bits = s->misc_bits;
1762 avctx->i_tex_bits = s->i_tex_bits;
1763 avctx->p_tex_bits = s->p_tex_bits;
1764 avctx->i_count = s->i_count;
1765 // FIXME f/b_count in avctx
1766 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1767 avctx->skip_count = s->skip_count;
1771 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1772 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1774 if (avctx->rc_buffer_size) {
1775 RateControlContext *rcc = &s->rc_context;
1776 int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1778 if (put_bits_count(&s->pb) > max_size &&
1779 s->lambda < s->lmax) {
1780 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1781 (s->qscale + 1) / s->qscale);
1782 if (s->adaptive_quant) {
1784 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1785 s->lambda_table[i] =
1786 FFMAX(s->lambda_table[i] + 1,
1787 s->lambda_table[i] * (s->qscale + 1) /
1790 s->mb_skipped = 0; // done in frame_start()
1791 // done in encode_picture() so we must undo it
1792 if (s->pict_type == AV_PICTURE_TYPE_P) {
1793 if (s->flipflop_rounding ||
1794 s->codec_id == AV_CODEC_ID_H263P ||
1795 s->codec_id == AV_CODEC_ID_MPEG4)
1796 s->no_rounding ^= 1;
1798 if (s->pict_type != AV_PICTURE_TYPE_B) {
1799 s->time_base = s->last_time_base;
1800 s->last_non_b_time = s->time - s->pp_time;
1802 for (i = 0; i < context_count; i++) {
1803 PutBitContext *pb = &s->thread_context[i]->pb;
1804 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1806 av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1810 av_assert0(s->avctx->rc_max_rate);
1813 if (s->avctx->flags & CODEC_FLAG_PASS1)
1814 ff_write_pass1_stats(s);
1816 for (i = 0; i < 4; i++) {
1817 s->current_picture_ptr->f->error[i] =
1818 s->current_picture.f->error[i] =
1819 s->current_picture.error[i];
1820 avctx->error[i] += s->current_picture_ptr->f->error[i];
1823 if (s->avctx->flags & CODEC_FLAG_PASS1)
1824 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1825 avctx->i_tex_bits + avctx->p_tex_bits ==
1826 put_bits_count(&s->pb));
1827 flush_put_bits(&s->pb);
1828 s->frame_bits = put_bits_count(&s->pb);
1830 stuffing_count = ff_vbv_update(s, s->frame_bits);
1831 s->stuffing_bits = 8*stuffing_count;
1832 if (stuffing_count) {
1833 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1834 stuffing_count + 50) {
1835 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1839 switch (s->codec_id) {
1840 case AV_CODEC_ID_MPEG1VIDEO:
1841 case AV_CODEC_ID_MPEG2VIDEO:
1842 while (stuffing_count--) {
1843 put_bits(&s->pb, 8, 0);
1846 case AV_CODEC_ID_MPEG4:
1847 put_bits(&s->pb, 16, 0);
1848 put_bits(&s->pb, 16, 0x1C3);
1849 stuffing_count -= 4;
1850 while (stuffing_count--) {
1851 put_bits(&s->pb, 8, 0xFF);
1855 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1857 flush_put_bits(&s->pb);
1858 s->frame_bits = put_bits_count(&s->pb);
1861 /* update mpeg1/2 vbv_delay for CBR */
1862 if (s->avctx->rc_max_rate &&
1863 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1864 s->out_format == FMT_MPEG1 &&
1865 90000LL * (avctx->rc_buffer_size - 1) <=
1866 s->avctx->rc_max_rate * 0xFFFFLL) {
1867 int vbv_delay, min_delay;
1868 double inbits = s->avctx->rc_max_rate *
1869 av_q2d(s->avctx->time_base);
1870 int minbits = s->frame_bits - 8 *
1871 (s->vbv_delay_ptr - s->pb.buf - 1);
1872 double bits = s->rc_context.buffer_index + minbits - inbits;
1875 av_log(s->avctx, AV_LOG_ERROR,
1876 "Internal error, negative bits\n");
1878 assert(s->repeat_first_field == 0);
1880 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1881 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1882 s->avctx->rc_max_rate;
1884 vbv_delay = FFMAX(vbv_delay, min_delay);
1886 av_assert0(vbv_delay < 0xFFFF);
1888 s->vbv_delay_ptr[0] &= 0xF8;
1889 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1890 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1891 s->vbv_delay_ptr[2] &= 0x07;
1892 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1893 avctx->vbv_delay = vbv_delay * 300;
1895 s->total_bits += s->frame_bits;
1896 avctx->frame_bits = s->frame_bits;
1898 pkt->pts = s->current_picture.f->pts;
1899 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1900 if (!s->current_picture.f->coded_picture_number)
1901 pkt->dts = pkt->pts - s->dts_delta;
1903 pkt->dts = s->reordered_pts;
1904 s->reordered_pts = pkt->pts;
1906 pkt->dts = pkt->pts;
1907 if (s->current_picture.f->key_frame)
1908 pkt->flags |= AV_PKT_FLAG_KEY;
1910 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1915 /* release non-reference frames */
1916 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1917 if (!s->picture[i].reference)
1918 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1921 av_assert1((s->frame_bits & 7) == 0);
1923 pkt->size = s->frame_bits / 8;
1924 *got_packet = !!pkt->size;
1928 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1929 int n, int threshold)
1931 static const char tab[64] = {
1932 3, 2, 2, 1, 1, 1, 1, 1,
1933 1, 1, 1, 1, 1, 1, 1, 1,
1934 1, 1, 1, 1, 1, 1, 1, 1,
1935 0, 0, 0, 0, 0, 0, 0, 0,
1936 0, 0, 0, 0, 0, 0, 0, 0,
1937 0, 0, 0, 0, 0, 0, 0, 0,
1938 0, 0, 0, 0, 0, 0, 0, 0,
1939 0, 0, 0, 0, 0, 0, 0, 0
1944 int16_t *block = s->block[n];
1945 const int last_index = s->block_last_index[n];
1948 if (threshold < 0) {
1950 threshold = -threshold;
1954 /* Are all we could set to zero already zero? */
1955 if (last_index <= skip_dc - 1)
1958 for (i = 0; i <= last_index; i++) {
1959 const int j = s->intra_scantable.permutated[i];
1960 const int level = FFABS(block[j]);
1962 if (skip_dc && i == 0)
1966 } else if (level > 1) {
1972 if (score >= threshold)
1974 for (i = skip_dc; i <= last_index; i++) {
1975 const int j = s->intra_scantable.permutated[i];
1979 s->block_last_index[n] = 0;
1981 s->block_last_index[n] = -1;
1984 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1988 const int maxlevel = s->max_qcoeff;
1989 const int minlevel = s->min_qcoeff;
1993 i = 1; // skip clipping of intra dc
1997 for (; i <= last_index; i++) {
1998 const int j = s->intra_scantable.permutated[i];
1999 int level = block[j];
2001 if (level > maxlevel) {
2004 } else if (level < minlevel) {
2012 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2013 av_log(s->avctx, AV_LOG_INFO,
2014 "warning, clipping %d dct coefficients to %d..%d\n",
2015 overflow, minlevel, maxlevel);
2018 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2022 for (y = 0; y < 8; y++) {
2023 for (x = 0; x < 8; x++) {
2029 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2030 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2031 int v = ptr[x2 + y2 * stride];
2037 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2042 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2043 int motion_x, int motion_y,
2044 int mb_block_height,
2048 int16_t weight[12][64];
2049 int16_t orig[12][64];
2050 const int mb_x = s->mb_x;
2051 const int mb_y = s->mb_y;
2054 int dct_offset = s->linesize * 8; // default for progressive frames
2055 int uv_dct_offset = s->uvlinesize * 8;
2056 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2057 ptrdiff_t wrap_y, wrap_c;
2059 for (i = 0; i < mb_block_count; i++)
2060 skip_dct[i] = s->skipdct;
2062 if (s->adaptive_quant) {
2063 const int last_qp = s->qscale;
2064 const int mb_xy = mb_x + mb_y * s->mb_stride;
2066 s->lambda = s->lambda_table[mb_xy];
2069 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2070 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2071 s->dquant = s->qscale - last_qp;
2073 if (s->out_format == FMT_H263) {
2074 s->dquant = av_clip(s->dquant, -2, 2);
2076 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2078 if (s->pict_type == AV_PICTURE_TYPE_B) {
2079 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2082 if (s->mv_type == MV_TYPE_8X8)
2088 ff_set_qscale(s, last_qp + s->dquant);
2089 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2090 ff_set_qscale(s, s->qscale + s->dquant);
2092 wrap_y = s->linesize;
2093 wrap_c = s->uvlinesize;
2094 ptr_y = s->new_picture.f->data[0] +
2095 (mb_y * 16 * wrap_y) + mb_x * 16;
2096 ptr_cb = s->new_picture.f->data[1] +
2097 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2098 ptr_cr = s->new_picture.f->data[2] +
2099 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2101 if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2102 uint8_t *ebuf = s->edge_emu_buffer + 36 * wrap_y;
2103 int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2104 int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2105 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2107 16, 16, mb_x * 16, mb_y * 16,
2108 s->width, s->height);
2110 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2112 mb_block_width, mb_block_height,
2113 mb_x * mb_block_width, mb_y * mb_block_height,
2115 ptr_cb = ebuf + 16 * wrap_y;
2116 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2118 mb_block_width, mb_block_height,
2119 mb_x * mb_block_width, mb_y * mb_block_height,
2121 ptr_cr = ebuf + 16 * wrap_y + 16;
2125 if (s->avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
2126 int progressive_score, interlaced_score;
2128 s->interlaced_dct = 0;
2129 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2130 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2131 NULL, wrap_y, 8) - 400;
2133 if (progressive_score > 0) {
2134 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2135 NULL, wrap_y * 2, 8) +
2136 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2137 NULL, wrap_y * 2, 8);
2138 if (progressive_score > interlaced_score) {
2139 s->interlaced_dct = 1;
2141 dct_offset = wrap_y;
2142 uv_dct_offset = wrap_c;
2144 if (s->chroma_format == CHROMA_422 ||
2145 s->chroma_format == CHROMA_444)
2151 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2152 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2153 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2154 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2156 if (s->avctx->flags & CODEC_FLAG_GRAY) {
2160 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2161 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2162 if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2163 s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2164 s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2165 } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2166 s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2167 s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2168 s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2169 s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2170 s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2171 s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2175 op_pixels_func (*op_pix)[4];
2176 qpel_mc_func (*op_qpix)[16];
2177 uint8_t *dest_y, *dest_cb, *dest_cr;
2179 dest_y = s->dest[0];
2180 dest_cb = s->dest[1];
2181 dest_cr = s->dest[2];
2183 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2184 op_pix = s->hdsp.put_pixels_tab;
2185 op_qpix = s->qdsp.put_qpel_pixels_tab;
2187 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2188 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2191 if (s->mv_dir & MV_DIR_FORWARD) {
2192 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2193 s->last_picture.f->data,
2195 op_pix = s->hdsp.avg_pixels_tab;
2196 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2198 if (s->mv_dir & MV_DIR_BACKWARD) {
2199 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2200 s->next_picture.f->data,
2204 if (s->avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
2205 int progressive_score, interlaced_score;
2207 s->interlaced_dct = 0;
2208 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2209 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2213 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2214 progressive_score -= 400;
2216 if (progressive_score > 0) {
2217 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2219 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2223 if (progressive_score > interlaced_score) {
2224 s->interlaced_dct = 1;
2226 dct_offset = wrap_y;
2227 uv_dct_offset = wrap_c;
2229 if (s->chroma_format == CHROMA_422)
2235 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2236 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2237 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2238 dest_y + dct_offset, wrap_y);
2239 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2240 dest_y + dct_offset + 8, wrap_y);
2242 if (s->avctx->flags & CODEC_FLAG_GRAY) {
2246 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2247 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2248 if (!s->chroma_y_shift) { /* 422 */
2249 s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2250 dest_cb + uv_dct_offset, wrap_c);
2251 s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2252 dest_cr + uv_dct_offset, wrap_c);
2255 /* pre quantization */
2256 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2257 2 * s->qscale * s->qscale) {
2259 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2261 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2263 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2264 wrap_y, 8) < 20 * s->qscale)
2266 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2267 wrap_y, 8) < 20 * s->qscale)
2269 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2271 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2273 if (!s->chroma_y_shift) { /* 422 */
2274 if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2275 dest_cb + uv_dct_offset,
2276 wrap_c, 8) < 20 * s->qscale)
2278 if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2279 dest_cr + uv_dct_offset,
2280 wrap_c, 8) < 20 * s->qscale)
2286 if (s->quantizer_noise_shaping) {
2288 get_visual_weight(weight[0], ptr_y , wrap_y);
2290 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2292 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2294 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2296 get_visual_weight(weight[4], ptr_cb , wrap_c);
2298 get_visual_weight(weight[5], ptr_cr , wrap_c);
2299 if (!s->chroma_y_shift) { /* 422 */
2301 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2304 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2307 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2310 /* DCT & quantize */
2311 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2313 for (i = 0; i < mb_block_count; i++) {
2316 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2317 // FIXME we could decide to change to quantizer instead of
2319 // JS: I don't think that would be a good idea it could lower
2320 // quality instead of improve it. Just INTRADC clipping
2321 // deserves changes in quantizer
2323 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2325 s->block_last_index[i] = -1;
2327 if (s->quantizer_noise_shaping) {
2328 for (i = 0; i < mb_block_count; i++) {
2330 s->block_last_index[i] =
2331 dct_quantize_refine(s, s->block[i], weight[i],
2332 orig[i], i, s->qscale);
2337 if (s->luma_elim_threshold && !s->mb_intra)
2338 for (i = 0; i < 4; i++)
2339 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2340 if (s->chroma_elim_threshold && !s->mb_intra)
2341 for (i = 4; i < mb_block_count; i++)
2342 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2344 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2345 for (i = 0; i < mb_block_count; i++) {
2346 if (s->block_last_index[i] == -1)
2347 s->coded_score[i] = INT_MAX / 256;
2352 if ((s->avctx->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
2353 s->block_last_index[4] =
2354 s->block_last_index[5] = 0;
2356 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2357 if (!s->chroma_y_shift) { /* 422 / 444 */
2358 for (i=6; i<12; i++) {
2359 s->block_last_index[i] = 0;
2360 s->block[i][0] = s->block[4][0];
2365 // non c quantize code returns incorrect block_last_index FIXME
2366 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2367 for (i = 0; i < mb_block_count; i++) {
2369 if (s->block_last_index[i] > 0) {
2370 for (j = 63; j > 0; j--) {
2371 if (s->block[i][s->intra_scantable.permutated[j]])
2374 s->block_last_index[i] = j;
2379 /* huffman encode */
2380 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2381 case AV_CODEC_ID_MPEG1VIDEO:
2382 case AV_CODEC_ID_MPEG2VIDEO:
2383 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2384 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2386 case AV_CODEC_ID_MPEG4:
2387 if (CONFIG_MPEG4_ENCODER)
2388 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2390 case AV_CODEC_ID_MSMPEG4V2:
2391 case AV_CODEC_ID_MSMPEG4V3:
2392 case AV_CODEC_ID_WMV1:
2393 if (CONFIG_MSMPEG4_ENCODER)
2394 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2396 case AV_CODEC_ID_WMV2:
2397 if (CONFIG_WMV2_ENCODER)
2398 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2400 case AV_CODEC_ID_H261:
2401 if (CONFIG_H261_ENCODER)
2402 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2404 case AV_CODEC_ID_H263:
2405 case AV_CODEC_ID_H263P:
2406 case AV_CODEC_ID_FLV1:
2407 case AV_CODEC_ID_RV10:
2408 case AV_CODEC_ID_RV20:
2409 if (CONFIG_H263_ENCODER)
2410 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2412 case AV_CODEC_ID_MJPEG:
2413 case AV_CODEC_ID_AMV:
2414 if (CONFIG_MJPEG_ENCODER)
2415 ff_mjpeg_encode_mb(s, s->block);
2422 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2424 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2425 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2426 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2429 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2432 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2435 d->mb_skip_run= s->mb_skip_run;
2437 d->last_dc[i] = s->last_dc[i];
2440 d->mv_bits= s->mv_bits;
2441 d->i_tex_bits= s->i_tex_bits;
2442 d->p_tex_bits= s->p_tex_bits;
2443 d->i_count= s->i_count;
2444 d->f_count= s->f_count;
2445 d->b_count= s->b_count;
2446 d->skip_count= s->skip_count;
2447 d->misc_bits= s->misc_bits;
2451 d->qscale= s->qscale;
2452 d->dquant= s->dquant;
2454 d->esc3_level_length= s->esc3_level_length;
2457 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2460 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2461 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2464 d->mb_skip_run= s->mb_skip_run;
2466 d->last_dc[i] = s->last_dc[i];
2469 d->mv_bits= s->mv_bits;
2470 d->i_tex_bits= s->i_tex_bits;
2471 d->p_tex_bits= s->p_tex_bits;
2472 d->i_count= s->i_count;
2473 d->f_count= s->f_count;
2474 d->b_count= s->b_count;
2475 d->skip_count= s->skip_count;
2476 d->misc_bits= s->misc_bits;
2478 d->mb_intra= s->mb_intra;
2479 d->mb_skipped= s->mb_skipped;
2480 d->mv_type= s->mv_type;
2481 d->mv_dir= s->mv_dir;
2483 if(s->data_partitioning){
2485 d->tex_pb= s->tex_pb;
2489 d->block_last_index[i]= s->block_last_index[i];
2490 d->interlaced_dct= s->interlaced_dct;
2491 d->qscale= s->qscale;
2493 d->esc3_level_length= s->esc3_level_length;
2496 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2497 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2498 int *dmin, int *next_block, int motion_x, int motion_y)
2501 uint8_t *dest_backup[3];
2503 copy_context_before_encode(s, backup, type);
2505 s->block= s->blocks[*next_block];
2506 s->pb= pb[*next_block];
2507 if(s->data_partitioning){
2508 s->pb2 = pb2 [*next_block];
2509 s->tex_pb= tex_pb[*next_block];
2513 memcpy(dest_backup, s->dest, sizeof(s->dest));
2514 s->dest[0] = s->rd_scratchpad;
2515 s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2516 s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2517 av_assert0(s->linesize >= 32); //FIXME
2520 encode_mb(s, motion_x, motion_y);
2522 score= put_bits_count(&s->pb);
2523 if(s->data_partitioning){
2524 score+= put_bits_count(&s->pb2);
2525 score+= put_bits_count(&s->tex_pb);
2528 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2529 ff_mpv_decode_mb(s, s->block);
2531 score *= s->lambda2;
2532 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2536 memcpy(s->dest, dest_backup, sizeof(s->dest));
2543 copy_context_after_encode(best, s, type);
2547 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2548 uint32_t *sq = ff_square_tab + 256;
2553 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2554 else if(w==8 && h==8)
2555 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2559 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2568 static int sse_mb(MpegEncContext *s){
2572 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2573 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2576 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2577 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2578 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2579 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2581 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2582 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2583 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2586 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2587 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2588 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2591 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2592 MpegEncContext *s= *(void**)arg;
2596 s->me.dia_size= s->avctx->pre_dia_size;
2597 s->first_slice_line=1;
2598 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2599 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2600 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2602 s->first_slice_line=0;
2610 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2611 MpegEncContext *s= *(void**)arg;
2613 ff_check_alignment();
2615 s->me.dia_size= s->avctx->dia_size;
2616 s->first_slice_line=1;
2617 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2618 s->mb_x=0; //for block init below
2619 ff_init_block_index(s);
2620 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2621 s->block_index[0]+=2;
2622 s->block_index[1]+=2;
2623 s->block_index[2]+=2;
2624 s->block_index[3]+=2;
2626 /* compute motion vector & mb_type and store in context */
2627 if(s->pict_type==AV_PICTURE_TYPE_B)
2628 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2630 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2632 s->first_slice_line=0;
2637 static int mb_var_thread(AVCodecContext *c, void *arg){
2638 MpegEncContext *s= *(void**)arg;
2641 ff_check_alignment();
2643 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2644 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2647 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2649 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2651 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2652 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2654 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2655 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2656 s->me.mb_var_sum_temp += varc;
2662 static void write_slice_end(MpegEncContext *s){
2663 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2664 if(s->partitioned_frame){
2665 ff_mpeg4_merge_partitions(s);
2668 ff_mpeg4_stuffing(&s->pb);
2669 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2670 ff_mjpeg_encode_stuffing(s);
2673 avpriv_align_put_bits(&s->pb);
2674 flush_put_bits(&s->pb);
2676 if ((s->avctx->flags & CODEC_FLAG_PASS1) && !s->partitioned_frame)
2677 s->misc_bits+= get_bits_diff(s);
2680 static void write_mb_info(MpegEncContext *s)
2682 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2683 int offset = put_bits_count(&s->pb);
2684 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2685 int gobn = s->mb_y / s->gob_index;
2687 if (CONFIG_H263_ENCODER)
2688 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2689 bytestream_put_le32(&ptr, offset);
2690 bytestream_put_byte(&ptr, s->qscale);
2691 bytestream_put_byte(&ptr, gobn);
2692 bytestream_put_le16(&ptr, mba);
2693 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2694 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2695 /* 4MV not implemented */
2696 bytestream_put_byte(&ptr, 0); /* hmv2 */
2697 bytestream_put_byte(&ptr, 0); /* vmv2 */
2700 static void update_mb_info(MpegEncContext *s, int startcode)
2704 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2705 s->mb_info_size += 12;
2706 s->prev_mb_info = s->last_mb_info;
2709 s->prev_mb_info = put_bits_count(&s->pb)/8;
2710 /* This might have incremented mb_info_size above, and we return without
2711 * actually writing any info into that slot yet. But in that case,
2712 * this will be called again at the start of the after writing the
2713 * start code, actually writing the mb info. */
2717 s->last_mb_info = put_bits_count(&s->pb)/8;
2718 if (!s->mb_info_size)
2719 s->mb_info_size += 12;
2723 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2725 if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2726 && s->slice_context_count == 1
2727 && s->pb.buf == s->avctx->internal->byte_buffer) {
2728 int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2729 int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2731 uint8_t *new_buffer = NULL;
2732 int new_buffer_size = 0;
2734 av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2735 s->avctx->internal->byte_buffer_size + size_increase);
2737 return AVERROR(ENOMEM);
2739 memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2740 av_free(s->avctx->internal->byte_buffer);
2741 s->avctx->internal->byte_buffer = new_buffer;
2742 s->avctx->internal->byte_buffer_size = new_buffer_size;
2743 rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2744 s->ptr_lastgob = s->pb.buf + lastgob_pos;
2745 s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2747 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2748 return AVERROR(EINVAL);
2752 static int encode_thread(AVCodecContext *c, void *arg){
2753 MpegEncContext *s= *(void**)arg;
2754 int mb_x, mb_y, pdif = 0;
2755 int chr_h= 16>>s->chroma_y_shift;
2757 MpegEncContext best_s = { 0 }, backup_s;
2758 uint8_t bit_buf[2][MAX_MB_BYTES];
2759 uint8_t bit_buf2[2][MAX_MB_BYTES];
2760 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2761 PutBitContext pb[2], pb2[2], tex_pb[2];
2763 ff_check_alignment();
2766 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2767 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2768 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2771 s->last_bits= put_bits_count(&s->pb);
2782 /* init last dc values */
2783 /* note: quant matrix value (8) is implied here */
2784 s->last_dc[i] = 128 << s->intra_dc_precision;
2786 s->current_picture.error[i] = 0;
2788 if(s->codec_id==AV_CODEC_ID_AMV){
2789 s->last_dc[0] = 128*8/13;
2790 s->last_dc[1] = 128*8/14;
2791 s->last_dc[2] = 128*8/14;
2794 memset(s->last_mv, 0, sizeof(s->last_mv));
2798 switch(s->codec_id){
2799 case AV_CODEC_ID_H263:
2800 case AV_CODEC_ID_H263P:
2801 case AV_CODEC_ID_FLV1:
2802 if (CONFIG_H263_ENCODER)
2803 s->gob_index = H263_GOB_HEIGHT(s->height);
2805 case AV_CODEC_ID_MPEG4:
2806 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2807 ff_mpeg4_init_partitions(s);
2813 s->first_slice_line = 1;
2814 s->ptr_lastgob = s->pb.buf;
2815 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2819 ff_set_qscale(s, s->qscale);
2820 ff_init_block_index(s);
2822 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2823 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2824 int mb_type= s->mb_type[xy];
2828 int size_increase = s->avctx->internal->byte_buffer_size/4
2829 + s->mb_width*MAX_MB_BYTES;
2831 ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2832 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2833 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2836 if(s->data_partitioning){
2837 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2838 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2839 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2845 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2846 ff_update_block_index(s);
2848 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2849 ff_h261_reorder_mb_index(s);
2850 xy= s->mb_y*s->mb_stride + s->mb_x;
2851 mb_type= s->mb_type[xy];
2854 /* write gob / video packet header */
2856 int current_packet_size, is_gob_start;
2858 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2860 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2862 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2864 switch(s->codec_id){
2865 case AV_CODEC_ID_H263:
2866 case AV_CODEC_ID_H263P:
2867 if(!s->h263_slice_structured)
2868 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2870 case AV_CODEC_ID_MPEG2VIDEO:
2871 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2872 case AV_CODEC_ID_MPEG1VIDEO:
2873 if(s->mb_skip_run) is_gob_start=0;
2875 case AV_CODEC_ID_MJPEG:
2876 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2881 if(s->start_mb_y != mb_y || mb_x!=0){
2884 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2885 ff_mpeg4_init_partitions(s);
2889 av_assert2((put_bits_count(&s->pb)&7) == 0);
2890 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2892 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2893 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2894 int d = 100 / s->error_rate;
2896 current_packet_size=0;
2897 s->pb.buf_ptr= s->ptr_lastgob;
2898 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2902 if (s->avctx->rtp_callback){
2903 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2904 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2906 update_mb_info(s, 1);
2908 switch(s->codec_id){
2909 case AV_CODEC_ID_MPEG4:
2910 if (CONFIG_MPEG4_ENCODER) {
2911 ff_mpeg4_encode_video_packet_header(s);
2912 ff_mpeg4_clean_buffers(s);
2915 case AV_CODEC_ID_MPEG1VIDEO:
2916 case AV_CODEC_ID_MPEG2VIDEO:
2917 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2918 ff_mpeg1_encode_slice_header(s);
2919 ff_mpeg1_clean_buffers(s);
2922 case AV_CODEC_ID_H263:
2923 case AV_CODEC_ID_H263P:
2924 if (CONFIG_H263_ENCODER)
2925 ff_h263_encode_gob_header(s, mb_y);
2929 if (s->avctx->flags & CODEC_FLAG_PASS1) {
2930 int bits= put_bits_count(&s->pb);
2931 s->misc_bits+= bits - s->last_bits;
2935 s->ptr_lastgob += current_packet_size;
2936 s->first_slice_line=1;
2937 s->resync_mb_x=mb_x;
2938 s->resync_mb_y=mb_y;
2942 if( (s->resync_mb_x == s->mb_x)
2943 && s->resync_mb_y+1 == s->mb_y){
2944 s->first_slice_line=0;
2948 s->dquant=0; //only for QP_RD
2950 update_mb_info(s, 0);
2952 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2954 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2956 copy_context_before_encode(&backup_s, s, -1);
2958 best_s.data_partitioning= s->data_partitioning;
2959 best_s.partitioned_frame= s->partitioned_frame;
2960 if(s->data_partitioning){
2961 backup_s.pb2= s->pb2;
2962 backup_s.tex_pb= s->tex_pb;
2965 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2966 s->mv_dir = MV_DIR_FORWARD;
2967 s->mv_type = MV_TYPE_16X16;
2969 s->mv[0][0][0] = s->p_mv_table[xy][0];
2970 s->mv[0][0][1] = s->p_mv_table[xy][1];
2971 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2972 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2974 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2975 s->mv_dir = MV_DIR_FORWARD;
2976 s->mv_type = MV_TYPE_FIELD;
2979 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2980 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2981 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2983 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2984 &dmin, &next_block, 0, 0);
2986 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2987 s->mv_dir = MV_DIR_FORWARD;
2988 s->mv_type = MV_TYPE_16X16;
2992 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2993 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2995 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2996 s->mv_dir = MV_DIR_FORWARD;
2997 s->mv_type = MV_TYPE_8X8;
3000 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3001 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3003 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3004 &dmin, &next_block, 0, 0);
3006 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3007 s->mv_dir = MV_DIR_FORWARD;
3008 s->mv_type = MV_TYPE_16X16;
3010 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3011 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3012 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3013 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3015 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3016 s->mv_dir = MV_DIR_BACKWARD;
3017 s->mv_type = MV_TYPE_16X16;
3019 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3020 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3021 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3022 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3024 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3025 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3026 s->mv_type = MV_TYPE_16X16;
3028 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3029 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3030 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3031 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3032 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3033 &dmin, &next_block, 0, 0);
3035 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3036 s->mv_dir = MV_DIR_FORWARD;
3037 s->mv_type = MV_TYPE_FIELD;
3040 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3041 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3042 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3044 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3045 &dmin, &next_block, 0, 0);
3047 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3048 s->mv_dir = MV_DIR_BACKWARD;
3049 s->mv_type = MV_TYPE_FIELD;
3052 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3053 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3054 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3056 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3057 &dmin, &next_block, 0, 0);
3059 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3060 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3061 s->mv_type = MV_TYPE_FIELD;
3063 for(dir=0; dir<2; dir++){
3065 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3066 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3067 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3070 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3071 &dmin, &next_block, 0, 0);
3073 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3075 s->mv_type = MV_TYPE_16X16;
3079 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3080 &dmin, &next_block, 0, 0);
3081 if(s->h263_pred || s->h263_aic){
3083 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3085 ff_clean_intra_table_entries(s); //old mode?
3089 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3090 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3091 const int last_qp= backup_s.qscale;
3094 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3095 static const int dquant_tab[4]={-1,1,-2,2};
3096 int storecoefs = s->mb_intra && s->dc_val[0];
3098 av_assert2(backup_s.dquant == 0);
3101 s->mv_dir= best_s.mv_dir;
3102 s->mv_type = MV_TYPE_16X16;
3103 s->mb_intra= best_s.mb_intra;
3104 s->mv[0][0][0] = best_s.mv[0][0][0];
3105 s->mv[0][0][1] = best_s.mv[0][0][1];
3106 s->mv[1][0][0] = best_s.mv[1][0][0];
3107 s->mv[1][0][1] = best_s.mv[1][0][1];
3109 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3110 for(; qpi<4; qpi++){
3111 int dquant= dquant_tab[qpi];
3112 qp= last_qp + dquant;
3113 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3115 backup_s.dquant= dquant;
3118 dc[i]= s->dc_val[0][ s->block_index[i] ];
3119 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3123 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3124 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3125 if(best_s.qscale != qp){
3128 s->dc_val[0][ s->block_index[i] ]= dc[i];
3129 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3136 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3137 int mx= s->b_direct_mv_table[xy][0];
3138 int my= s->b_direct_mv_table[xy][1];
3140 backup_s.dquant = 0;
3141 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3143 ff_mpeg4_set_direct_mv(s, mx, my);
3144 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3145 &dmin, &next_block, mx, my);
3147 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3148 backup_s.dquant = 0;
3149 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3151 ff_mpeg4_set_direct_mv(s, 0, 0);
3152 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3153 &dmin, &next_block, 0, 0);
3155 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3158 coded |= s->block_last_index[i];
3161 memcpy(s->mv, best_s.mv, sizeof(s->mv));
3162 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3163 mx=my=0; //FIXME find the one we actually used
3164 ff_mpeg4_set_direct_mv(s, mx, my);
3165 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3173 s->mv_dir= best_s.mv_dir;
3174 s->mv_type = best_s.mv_type;
3176 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3177 s->mv[0][0][1] = best_s.mv[0][0][1];
3178 s->mv[1][0][0] = best_s.mv[1][0][0];
3179 s->mv[1][0][1] = best_s.mv[1][0][1];*/
3182 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3183 &dmin, &next_block, mx, my);
3188 s->current_picture.qscale_table[xy] = best_s.qscale;
3190 copy_context_after_encode(s, &best_s, -1);
3192 pb_bits_count= put_bits_count(&s->pb);
3193 flush_put_bits(&s->pb);
3194 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3197 if(s->data_partitioning){
3198 pb2_bits_count= put_bits_count(&s->pb2);
3199 flush_put_bits(&s->pb2);
3200 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3201 s->pb2= backup_s.pb2;
3203 tex_pb_bits_count= put_bits_count(&s->tex_pb);
3204 flush_put_bits(&s->tex_pb);
3205 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3206 s->tex_pb= backup_s.tex_pb;
3208 s->last_bits= put_bits_count(&s->pb);
3210 if (CONFIG_H263_ENCODER &&
3211 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3212 ff_h263_update_motion_val(s);
3214 if(next_block==0){ //FIXME 16 vs linesize16
3215 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
3216 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3217 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3220 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3221 ff_mpv_decode_mb(s, s->block);
3223 int motion_x = 0, motion_y = 0;
3224 s->mv_type=MV_TYPE_16X16;
3225 // only one MB-Type possible
3228 case CANDIDATE_MB_TYPE_INTRA:
3231 motion_x= s->mv[0][0][0] = 0;
3232 motion_y= s->mv[0][0][1] = 0;
3234 case CANDIDATE_MB_TYPE_INTER:
3235 s->mv_dir = MV_DIR_FORWARD;
3237 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3238 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3240 case CANDIDATE_MB_TYPE_INTER_I:
3241 s->mv_dir = MV_DIR_FORWARD;
3242 s->mv_type = MV_TYPE_FIELD;
3245 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3246 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3247 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3250 case CANDIDATE_MB_TYPE_INTER4V:
3251 s->mv_dir = MV_DIR_FORWARD;
3252 s->mv_type = MV_TYPE_8X8;
3255 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3256 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3259 case CANDIDATE_MB_TYPE_DIRECT:
3260 if (CONFIG_MPEG4_ENCODER) {
3261 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3263 motion_x=s->b_direct_mv_table[xy][0];
3264 motion_y=s->b_direct_mv_table[xy][1];
3265 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3268 case CANDIDATE_MB_TYPE_DIRECT0:
3269 if (CONFIG_MPEG4_ENCODER) {
3270 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3272 ff_mpeg4_set_direct_mv(s, 0, 0);
3275 case CANDIDATE_MB_TYPE_BIDIR:
3276 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3278 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3279 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3280 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3281 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3283 case CANDIDATE_MB_TYPE_BACKWARD:
3284 s->mv_dir = MV_DIR_BACKWARD;
3286 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3287 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3289 case CANDIDATE_MB_TYPE_FORWARD:
3290 s->mv_dir = MV_DIR_FORWARD;
3292 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3293 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3295 case CANDIDATE_MB_TYPE_FORWARD_I:
3296 s->mv_dir = MV_DIR_FORWARD;
3297 s->mv_type = MV_TYPE_FIELD;
3300 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3301 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3302 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3305 case CANDIDATE_MB_TYPE_BACKWARD_I:
3306 s->mv_dir = MV_DIR_BACKWARD;
3307 s->mv_type = MV_TYPE_FIELD;
3310 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3311 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3312 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3315 case CANDIDATE_MB_TYPE_BIDIR_I:
3316 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3317 s->mv_type = MV_TYPE_FIELD;
3319 for(dir=0; dir<2; dir++){
3321 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3322 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3323 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3328 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3331 encode_mb(s, motion_x, motion_y);
3333 // RAL: Update last macroblock type
3334 s->last_mv_dir = s->mv_dir;
3336 if (CONFIG_H263_ENCODER &&
3337 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3338 ff_h263_update_motion_val(s);
3340 ff_mpv_decode_mb(s, s->block);
3343 /* clean the MV table in IPS frames for direct mode in B frames */
3344 if(s->mb_intra /* && I,P,S_TYPE */){
3345 s->p_mv_table[xy][0]=0;
3346 s->p_mv_table[xy][1]=0;
3349 if (s->avctx->flags & CODEC_FLAG_PSNR) {
3353 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3354 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3356 s->current_picture.error[0] += sse(
3357 s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3358 s->dest[0], w, h, s->linesize);
3359 s->current_picture.error[1] += sse(
3360 s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3361 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3362 s->current_picture.error[2] += sse(
3363 s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3364 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3367 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3368 ff_h263_loop_filter(s);
3370 ff_dlog(s->avctx, "MB %d %d bits\n",
3371 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3375 //not beautiful here but we must write it before flushing so it has to be here
3376 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3377 ff_msmpeg4_encode_ext_header(s);
3381 /* Send the last GOB if RTP */
3382 if (s->avctx->rtp_callback) {
3383 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3384 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3385 /* Call the RTP callback to send the last GOB */
3387 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3393 #define MERGE(field) dst->field += src->field; src->field=0
3394 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3395 MERGE(me.scene_change_score);
3396 MERGE(me.mc_mb_var_sum_temp);
3397 MERGE(me.mb_var_sum_temp);
3400 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3403 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3404 MERGE(dct_count[1]);
3413 MERGE(er.error_count);
3414 MERGE(padding_bug_score);
3415 MERGE(current_picture.error[0]);
3416 MERGE(current_picture.error[1]);
3417 MERGE(current_picture.error[2]);
3419 if(dst->avctx->noise_reduction){
3420 for(i=0; i<64; i++){
3421 MERGE(dct_error_sum[0][i]);
3422 MERGE(dct_error_sum[1][i]);
3426 assert(put_bits_count(&src->pb) % 8 ==0);
3427 assert(put_bits_count(&dst->pb) % 8 ==0);
3428 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3429 flush_put_bits(&dst->pb);
3432 static int estimate_qp(MpegEncContext *s, int dry_run){
3433 if (s->next_lambda){
3434 s->current_picture_ptr->f->quality =
3435 s->current_picture.f->quality = s->next_lambda;
3436 if(!dry_run) s->next_lambda= 0;
3437 } else if (!s->fixed_qscale) {
3438 s->current_picture_ptr->f->quality =
3439 s->current_picture.f->quality = ff_rate_estimate_qscale(s, dry_run);
3440 if (s->current_picture.f->quality < 0)
3444 if(s->adaptive_quant){
3445 switch(s->codec_id){
3446 case AV_CODEC_ID_MPEG4:
3447 if (CONFIG_MPEG4_ENCODER)
3448 ff_clean_mpeg4_qscales(s);
3450 case AV_CODEC_ID_H263:
3451 case AV_CODEC_ID_H263P:
3452 case AV_CODEC_ID_FLV1:
3453 if (CONFIG_H263_ENCODER)
3454 ff_clean_h263_qscales(s);
3457 ff_init_qscale_tab(s);
3460 s->lambda= s->lambda_table[0];
3463 s->lambda = s->current_picture.f->quality;
3468 /* must be called before writing the header */
3469 static void set_frame_distances(MpegEncContext * s){
3470 av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3471 s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3473 if(s->pict_type==AV_PICTURE_TYPE_B){
3474 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3475 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3477 s->pp_time= s->time - s->last_non_b_time;
3478 s->last_non_b_time= s->time;
3479 assert(s->picture_number==0 || s->pp_time > 0);
3483 static int encode_picture(MpegEncContext *s, int picture_number)
3487 int context_count = s->slice_context_count;
3489 s->picture_number = picture_number;
3491 /* Reset the average MB variance */
3492 s->me.mb_var_sum_temp =
3493 s->me.mc_mb_var_sum_temp = 0;
3495 /* we need to initialize some time vars before we can encode b-frames */
3496 // RAL: Condition added for MPEG1VIDEO
3497 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3498 set_frame_distances(s);
3499 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3500 ff_set_mpeg4_time(s);
3502 s->me.scene_change_score=0;
3504 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3506 if(s->pict_type==AV_PICTURE_TYPE_I){
3507 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3508 else s->no_rounding=0;
3509 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3510 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3511 s->no_rounding ^= 1;
3514 if (s->avctx->flags & CODEC_FLAG_PASS2) {
3515 if (estimate_qp(s,1) < 0)
3517 ff_get_2pass_fcode(s);
3518 } else if (!(s->avctx->flags & CODEC_FLAG_QSCALE)) {
3519 if(s->pict_type==AV_PICTURE_TYPE_B)
3520 s->lambda= s->last_lambda_for[s->pict_type];
3522 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3526 if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3527 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3528 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3529 s->q_chroma_intra_matrix = s->q_intra_matrix;
3530 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3533 s->mb_intra=0; //for the rate distortion & bit compare functions
3534 for(i=1; i<context_count; i++){
3535 ret = ff_update_duplicate_context(s->thread_context[i], s);
3543 /* Estimate motion for every MB */
3544 if(s->pict_type != AV_PICTURE_TYPE_I){
3545 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3546 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3547 if (s->pict_type != AV_PICTURE_TYPE_B) {
3548 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3549 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3553 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3554 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3556 for(i=0; i<s->mb_stride*s->mb_height; i++)
3557 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3559 if(!s->fixed_qscale){
3560 /* finding spatial complexity for I-frame rate control */
3561 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3564 for(i=1; i<context_count; i++){
3565 merge_context_after_me(s, s->thread_context[i]);
3567 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3568 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3571 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3572 s->pict_type= AV_PICTURE_TYPE_I;
3573 for(i=0; i<s->mb_stride*s->mb_height; i++)
3574 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3575 if(s->msmpeg4_version >= 3)
3577 ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3578 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3582 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3583 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3585 if (s->avctx->flags & CODEC_FLAG_INTERLACED_ME) {
3587 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3588 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3589 s->f_code= FFMAX3(s->f_code, a, b);
3592 ff_fix_long_p_mvs(s);
3593 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3594 if (s->avctx->flags & CODEC_FLAG_INTERLACED_ME) {
3598 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3599 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3604 if(s->pict_type==AV_PICTURE_TYPE_B){
3607 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3608 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3609 s->f_code = FFMAX(a, b);
3611 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3612 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3613 s->b_code = FFMAX(a, b);
3615 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3616 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3617 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3618 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3619 if (s->avctx->flags & CODEC_FLAG_INTERLACED_ME) {
3621 for(dir=0; dir<2; dir++){
3624 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3625 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3626 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3627 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3635 if (estimate_qp(s, 0) < 0)
3638 if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3639 s->pict_type == AV_PICTURE_TYPE_I &&
3640 !(s->avctx->flags & CODEC_FLAG_QSCALE))
3641 s->qscale= 3; //reduce clipping problems
3643 if (s->out_format == FMT_MJPEG) {
3644 const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3645 const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3647 if (s->avctx->intra_matrix) {
3649 luma_matrix = s->avctx->intra_matrix;
3651 if (s->avctx->chroma_intra_matrix)
3652 chroma_matrix = s->avctx->chroma_intra_matrix;
3654 /* for mjpeg, we do include qscale in the matrix */
3656 int j = s->idsp.idct_permutation[i];
3658 s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3659 s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3661 s->y_dc_scale_table=
3662 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3663 s->chroma_intra_matrix[0] =
3664 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3665 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3666 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3667 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3668 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3671 if(s->codec_id == AV_CODEC_ID_AMV){
3672 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3673 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3675 int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3677 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3678 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3680 s->y_dc_scale_table= y;
3681 s->c_dc_scale_table= c;
3682 s->intra_matrix[0] = 13;
3683 s->chroma_intra_matrix[0] = 14;
3684 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3685 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3686 ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3687 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3691 //FIXME var duplication
3692 s->current_picture_ptr->f->key_frame =
3693 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3694 s->current_picture_ptr->f->pict_type =
3695 s->current_picture.f->pict_type = s->pict_type;
3697 if (s->current_picture.f->key_frame)
3698 s->picture_in_gop_number=0;
3700 s->mb_x = s->mb_y = 0;
3701 s->last_bits= put_bits_count(&s->pb);
3702 switch(s->out_format) {
3704 if (CONFIG_MJPEG_ENCODER)
3705 ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3706 s->intra_matrix, s->chroma_intra_matrix);
3709 if (CONFIG_H261_ENCODER)
3710 ff_h261_encode_picture_header(s, picture_number);
3713 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3714 ff_wmv2_encode_picture_header(s, picture_number);
3715 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3716 ff_msmpeg4_encode_picture_header(s, picture_number);
3717 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3718 ff_mpeg4_encode_picture_header(s, picture_number);
3719 else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3720 ret = ff_rv10_encode_picture_header(s, picture_number);
3724 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3725 ff_rv20_encode_picture_header(s, picture_number);
3726 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3727 ff_flv_encode_picture_header(s, picture_number);
3728 else if (CONFIG_H263_ENCODER)
3729 ff_h263_encode_picture_header(s, picture_number);
3732 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3733 ff_mpeg1_encode_picture_header(s, picture_number);
3738 bits= put_bits_count(&s->pb);
3739 s->header_bits= bits - s->last_bits;
3741 for(i=1; i<context_count; i++){
3742 update_duplicate_context_after_me(s->thread_context[i], s);
3744 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3745 for(i=1; i<context_count; i++){
3746 if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3747 set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3748 merge_context_after_encode(s, s->thread_context[i]);
3754 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3755 const int intra= s->mb_intra;
3758 s->dct_count[intra]++;
3760 for(i=0; i<64; i++){
3761 int level= block[i];
3765 s->dct_error_sum[intra][i] += level;
3766 level -= s->dct_offset[intra][i];
3767 if(level<0) level=0;
3769 s->dct_error_sum[intra][i] -= level;
3770 level += s->dct_offset[intra][i];
3771 if(level>0) level=0;
3778 static int dct_quantize_trellis_c(MpegEncContext *s,
3779 int16_t *block, int n,
3780 int qscale, int *overflow){
3782 const uint16_t *matrix;
3783 const uint8_t *scantable= s->intra_scantable.scantable;
3784 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3786 unsigned int threshold1, threshold2;
3798 int coeff_count[64];
3799 int qmul, qadd, start_i, last_non_zero, i, dc;
3800 const int esc_length= s->ac_esc_length;
3802 uint8_t * last_length;
3803 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3805 s->fdsp.fdct(block);
3807 if(s->dct_error_sum)
3808 s->denoise_dct(s, block);
3810 qadd= ((qscale-1)|1)*8;
3821 /* For AIC we skip quant/dequant of INTRADC */
3826 /* note: block[0] is assumed to be positive */
3827 block[0] = (block[0] + (q >> 1)) / q;
3830 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3831 matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3832 if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3833 bias= 1<<(QMAT_SHIFT-1);
3835 if (n > 3 && s->intra_chroma_ac_vlc_length) {
3836 length = s->intra_chroma_ac_vlc_length;
3837 last_length= s->intra_chroma_ac_vlc_last_length;
3839 length = s->intra_ac_vlc_length;
3840 last_length= s->intra_ac_vlc_last_length;
3845 qmat = s->q_inter_matrix[qscale];
3846 matrix = s->inter_matrix;
3847 length = s->inter_ac_vlc_length;
3848 last_length= s->inter_ac_vlc_last_length;
3852 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3853 threshold2= (threshold1<<1);
3855 for(i=63; i>=start_i; i--) {
3856 const int j = scantable[i];
3857 int level = block[j] * qmat[j];
3859 if(((unsigned)(level+threshold1))>threshold2){
3865 for(i=start_i; i<=last_non_zero; i++) {
3866 const int j = scantable[i];
3867 int level = block[j] * qmat[j];
3869 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3870 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3871 if(((unsigned)(level+threshold1))>threshold2){
3873 level= (bias + level)>>QMAT_SHIFT;
3875 coeff[1][i]= level-1;
3876 // coeff[2][k]= level-2;
3878 level= (bias - level)>>QMAT_SHIFT;
3879 coeff[0][i]= -level;
3880 coeff[1][i]= -level+1;
3881 // coeff[2][k]= -level+2;
3883 coeff_count[i]= FFMIN(level, 2);
3884 av_assert2(coeff_count[i]);
3887 coeff[0][i]= (level>>31)|1;
3892 *overflow= s->max_qcoeff < max; //overflow might have happened
3894 if(last_non_zero < start_i){
3895 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3896 return last_non_zero;
3899 score_tab[start_i]= 0;
3900 survivor[0]= start_i;
3903 for(i=start_i; i<=last_non_zero; i++){
3904 int level_index, j, zero_distortion;
3905 int dct_coeff= FFABS(block[ scantable[i] ]);
3906 int best_score=256*256*256*120;
3908 if (s->fdsp.fdct == ff_fdct_ifast)
3909 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3910 zero_distortion= dct_coeff*dct_coeff;
3912 for(level_index=0; level_index < coeff_count[i]; level_index++){
3914 int level= coeff[level_index][i];
3915 const int alevel= FFABS(level);
3920 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
3921 unquant_coeff= alevel*qmul + qadd;
3922 } else if(s->out_format == FMT_MJPEG) {
3923 j = s->idsp.idct_permutation[scantable[i]];
3924 unquant_coeff = alevel * matrix[j] * 8;
3926 j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
3928 unquant_coeff = (int)( alevel * qscale * matrix[j]) >> 3;
3929 unquant_coeff = (unquant_coeff - 1) | 1;
3931 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) matrix[j])) >> 4;
3932 unquant_coeff = (unquant_coeff - 1) | 1;
3937 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3939 if((level&(~127)) == 0){
3940 for(j=survivor_count-1; j>=0; j--){
3941 int run= i - survivor[j];
3942 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3943 score += score_tab[i-run];
3945 if(score < best_score){
3948 level_tab[i+1]= level-64;
3952 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
3953 for(j=survivor_count-1; j>=0; j--){
3954 int run= i - survivor[j];
3955 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3956 score += score_tab[i-run];
3957 if(score < last_score){
3960 last_level= level-64;
3966 distortion += esc_length*lambda;
3967 for(j=survivor_count-1; j>=0; j--){
3968 int run= i - survivor[j];
3969 int score= distortion + score_tab[i-run];
3971 if(score < best_score){
3974 level_tab[i+1]= level-64;
3978 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
3979 for(j=survivor_count-1; j>=0; j--){
3980 int run= i - survivor[j];
3981 int score= distortion + score_tab[i-run];
3982 if(score < last_score){
3985 last_level= level-64;
3993 score_tab[i+1]= best_score;
3995 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3996 if(last_non_zero <= 27){
3997 for(; survivor_count; survivor_count--){
3998 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4002 for(; survivor_count; survivor_count--){
4003 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4008 survivor[ survivor_count++ ]= i+1;
4011 if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4012 last_score= 256*256*256*120;
4013 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4014 int score= score_tab[i];
4015 if(i) score += lambda*2; //FIXME exacter?
4017 if(score < last_score){
4020 last_level= level_tab[i];
4021 last_run= run_tab[i];
4026 s->coded_score[n] = last_score;
4028 dc= FFABS(block[0]);
4029 last_non_zero= last_i - 1;
4030 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4032 if(last_non_zero < start_i)
4033 return last_non_zero;
4035 if(last_non_zero == 0 && start_i == 0){
4037 int best_score= dc * dc;
4039 for(i=0; i<coeff_count[0]; i++){
4040 int level= coeff[i][0];
4041 int alevel= FFABS(level);
4042 int unquant_coeff, score, distortion;
4044 if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4045 unquant_coeff= (alevel*qmul + qadd)>>3;
4047 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) matrix[0])) >> 4;
4048 unquant_coeff = (unquant_coeff - 1) | 1;
4050 unquant_coeff = (unquant_coeff + 4) >> 3;
4051 unquant_coeff<<= 3 + 3;
4053 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4055 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4056 else score= distortion + esc_length*lambda;
4058 if(score < best_score){
4060 best_level= level - 64;
4063 block[0]= best_level;
4064 s->coded_score[n] = best_score - dc*dc;
4065 if(best_level == 0) return -1;
4066 else return last_non_zero;
4070 av_assert2(last_level);
4072 block[ perm_scantable[last_non_zero] ]= last_level;
4075 for(; i>start_i; i -= run_tab[i] + 1){
4076 block[ perm_scantable[i-1] ]= level_tab[i];
4079 return last_non_zero;
4082 //#define REFINE_STATS 1
4083 static int16_t basis[64][64];
4085 static void build_basis(uint8_t *perm){
4092 double s= 0.25*(1<<BASIS_SHIFT);
4094 int perm_index= perm[index];
4095 if(i==0) s*= sqrt(0.5);
4096 if(j==0) s*= sqrt(0.5);
4097 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4104 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4105 int16_t *block, int16_t *weight, int16_t *orig,
4108 LOCAL_ALIGNED_16(int16_t, d1, [64]);
4109 const uint8_t *scantable= s->intra_scantable.scantable;
4110 const uint8_t *perm_scantable= s->intra_scantable.permutated;
4111 // unsigned int threshold1, threshold2;
4116 int qmul, qadd, start_i, last_non_zero, i, dc;
4118 uint8_t * last_length;
4120 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4123 static int after_last=0;
4124 static int to_zero=0;
4125 static int from_zero=0;
4128 static int messed_sign=0;
4131 if(basis[0][0] == 0)
4132 build_basis(s->idsp.idct_permutation);
4143 /* For AIC we skip quant/dequant of INTRADC */
4147 q <<= RECON_SHIFT-3;
4148 /* note: block[0] is assumed to be positive */
4150 // block[0] = (block[0] + (q >> 1)) / q;
4152 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4153 // bias= 1<<(QMAT_SHIFT-1);
4154 if (n > 3 && s->intra_chroma_ac_vlc_length) {
4155 length = s->intra_chroma_ac_vlc_length;
4156 last_length= s->intra_chroma_ac_vlc_last_length;
4158 length = s->intra_ac_vlc_length;
4159 last_length= s->intra_ac_vlc_last_length;
4164 length = s->inter_ac_vlc_length;
4165 last_length= s->inter_ac_vlc_last_length;
4167 last_non_zero = s->block_last_index[n];
4172 dc += (1<<(RECON_SHIFT-1));
4173 for(i=0; i<64; i++){
4174 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
4177 STOP_TIMER("memset rem[]")}
4180 for(i=0; i<64; i++){
4185 w= FFABS(weight[i]) + qns*one;
4186 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4189 // w=weight[i] = (63*qns + (w/2)) / w;
4192 av_assert2(w<(1<<6));
4195 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4201 for(i=start_i; i<=last_non_zero; i++){
4202 int j= perm_scantable[i];
4203 const int level= block[j];
4207 if(level<0) coeff= qmul*level - qadd;
4208 else coeff= qmul*level + qadd;
4209 run_tab[rle_index++]=run;
4212 s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4218 if(last_non_zero>0){
4219 STOP_TIMER("init rem[]")
4226 int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4229 int run2, best_unquant_change=0, analyze_gradient;
4233 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4235 if(analyze_gradient){
4239 for(i=0; i<64; i++){
4242 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4245 STOP_TIMER("rem*w*w")}
4255 const int level= block[0];
4256 int change, old_coeff;
4258 av_assert2(s->mb_intra);
4262 for(change=-1; change<=1; change+=2){
4263 int new_level= level + change;
4264 int score, new_coeff;
4266 new_coeff= q*new_level;
4267 if(new_coeff >= 2048 || new_coeff < 0)
4270 score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4271 new_coeff - old_coeff);
4272 if(score<best_score){
4275 best_change= change;
4276 best_unquant_change= new_coeff - old_coeff;
4283 run2= run_tab[rle_index++];
4287 for(i=start_i; i<64; i++){
4288 int j= perm_scantable[i];
4289 const int level= block[j];
4290 int change, old_coeff;
4292 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4296 if(level<0) old_coeff= qmul*level - qadd;
4297 else old_coeff= qmul*level + qadd;
4298 run2= run_tab[rle_index++]; //FIXME ! maybe after last
4302 av_assert2(run2>=0 || i >= last_non_zero );
4305 for(change=-1; change<=1; change+=2){
4306 int new_level= level + change;
4307 int score, new_coeff, unquant_change;
4310 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4314 if(new_level<0) new_coeff= qmul*new_level - qadd;
4315 else new_coeff= qmul*new_level + qadd;
4316 if(new_coeff >= 2048 || new_coeff <= -2048)
4318 //FIXME check for overflow
4321 if(level < 63 && level > -63){
4322 if(i < last_non_zero)
4323 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4324 - length[UNI_AC_ENC_INDEX(run, level+64)];
4326 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4327 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4330 av_assert2(FFABS(new_level)==1);
4332 if(analyze_gradient){
4333 int g= d1[ scantable[i] ];
4334 if(g && (g^new_level) >= 0)
4338 if(i < last_non_zero){
4339 int next_i= i + run2 + 1;
4340 int next_level= block[ perm_scantable[next_i] ] + 64;
4342 if(next_level&(~127))
4345 if(next_i < last_non_zero)
4346 score += length[UNI_AC_ENC_INDEX(run, 65)]
4347 + length[UNI_AC_ENC_INDEX(run2, next_level)]
4348 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4350 score += length[UNI_AC_ENC_INDEX(run, 65)]
4351 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4352 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4354 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4356 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4357 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4363 av_assert2(FFABS(level)==1);
4365 if(i < last_non_zero){
4366 int next_i= i + run2 + 1;
4367 int next_level= block[ perm_scantable[next_i] ] + 64;
4369 if(next_level&(~127))
4372 if(next_i < last_non_zero)
4373 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4374 - length[UNI_AC_ENC_INDEX(run2, next_level)]
4375 - length[UNI_AC_ENC_INDEX(run, 65)];
4377 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4378 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4379 - length[UNI_AC_ENC_INDEX(run, 65)];
4381 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4383 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4384 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4391 unquant_change= new_coeff - old_coeff;
4392 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4394 score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4396 if(score<best_score){
4399 best_change= change;
4400 best_unquant_change= unquant_change;
4404 prev_level= level + 64;
4405 if(prev_level&(~127))
4414 STOP_TIMER("iterative step")}
4418 int j= perm_scantable[ best_coeff ];
4420 block[j] += best_change;
4422 if(best_coeff > last_non_zero){
4423 last_non_zero= best_coeff;
4424 av_assert2(block[j]);
4431 if(block[j] - best_change){
4432 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4444 for(; last_non_zero>=start_i; last_non_zero--){
4445 if(block[perm_scantable[last_non_zero]])
4451 if(256*256*256*64 % count == 0){
4452 av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4457 for(i=start_i; i<=last_non_zero; i++){
4458 int j= perm_scantable[i];
4459 const int level= block[j];
4462 run_tab[rle_index++]=run;
4469 s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4475 if(last_non_zero>0){
4476 STOP_TIMER("iterative search")
4481 return last_non_zero;
4484 int ff_dct_quantize_c(MpegEncContext *s,
4485 int16_t *block, int n,
4486 int qscale, int *overflow)
4488 int i, j, level, last_non_zero, q, start_i;
4490 const uint8_t *scantable= s->intra_scantable.scantable;
4493 unsigned int threshold1, threshold2;
4495 s->fdsp.fdct(block);
4497 if(s->dct_error_sum)
4498 s->denoise_dct(s, block);
4508 /* For AIC we skip quant/dequant of INTRADC */
4511 /* note: block[0] is assumed to be positive */
4512 block[0] = (block[0] + (q >> 1)) / q;
4515 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4516 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4520 qmat = s->q_inter_matrix[qscale];
4521 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4523 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4524 threshold2= (threshold1<<1);
4525 for(i=63;i>=start_i;i--) {
4527 level = block[j] * qmat[j];
4529 if(((unsigned)(level+threshold1))>threshold2){
4536 for(i=start_i; i<=last_non_zero; i++) {
4538 level = block[j] * qmat[j];
4540 // if( bias+level >= (1<<QMAT_SHIFT)
4541 // || bias-level >= (1<<QMAT_SHIFT)){
4542 if(((unsigned)(level+threshold1))>threshold2){
4544 level= (bias + level)>>QMAT_SHIFT;
4547 level= (bias - level)>>QMAT_SHIFT;
4555 *overflow= s->max_qcoeff < max; //overflow might have happened
4557 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4558 if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4559 ff_block_permute(block, s->idsp.idct_permutation,
4560 scantable, last_non_zero);
4562 return last_non_zero;
4565 #define OFFSET(x) offsetof(MpegEncContext, x)
4566 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4567 static const AVOption h263_options[] = {
4568 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4569 { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4570 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4575 static const AVClass h263_class = {
4576 .class_name = "H.263 encoder",
4577 .item_name = av_default_item_name,
4578 .option = h263_options,
4579 .version = LIBAVUTIL_VERSION_INT,
4582 AVCodec ff_h263_encoder = {
4584 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4585 .type = AVMEDIA_TYPE_VIDEO,
4586 .id = AV_CODEC_ID_H263,
4587 .priv_data_size = sizeof(MpegEncContext),
4588 .init = ff_mpv_encode_init,
4589 .encode2 = ff_mpv_encode_picture,
4590 .close = ff_mpv_encode_end,
4591 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4592 .priv_class = &h263_class,
4595 static const AVOption h263p_options[] = {
4596 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4597 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4598 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4599 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4603 static const AVClass h263p_class = {
4604 .class_name = "H.263p encoder",
4605 .item_name = av_default_item_name,
4606 .option = h263p_options,
4607 .version = LIBAVUTIL_VERSION_INT,
4610 AVCodec ff_h263p_encoder = {
4612 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4613 .type = AVMEDIA_TYPE_VIDEO,
4614 .id = AV_CODEC_ID_H263P,
4615 .priv_data_size = sizeof(MpegEncContext),
4616 .init = ff_mpv_encode_init,
4617 .encode2 = ff_mpv_encode_picture,
4618 .close = ff_mpv_encode_end,
4619 .capabilities = CODEC_CAP_SLICE_THREADS,
4620 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4621 .priv_class = &h263p_class,
4624 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4626 AVCodec ff_msmpeg4v2_encoder = {
4627 .name = "msmpeg4v2",
4628 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4629 .type = AVMEDIA_TYPE_VIDEO,
4630 .id = AV_CODEC_ID_MSMPEG4V2,
4631 .priv_data_size = sizeof(MpegEncContext),
4632 .init = ff_mpv_encode_init,
4633 .encode2 = ff_mpv_encode_picture,
4634 .close = ff_mpv_encode_end,
4635 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4636 .priv_class = &msmpeg4v2_class,
4639 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4641 AVCodec ff_msmpeg4v3_encoder = {
4643 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4644 .type = AVMEDIA_TYPE_VIDEO,
4645 .id = AV_CODEC_ID_MSMPEG4V3,
4646 .priv_data_size = sizeof(MpegEncContext),
4647 .init = ff_mpv_encode_init,
4648 .encode2 = ff_mpv_encode_picture,
4649 .close = ff_mpv_encode_end,
4650 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4651 .priv_class = &msmpeg4v3_class,
4654 FF_MPV_GENERIC_CLASS(wmv1)
4656 AVCodec ff_wmv1_encoder = {
4658 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4659 .type = AVMEDIA_TYPE_VIDEO,
4660 .id = AV_CODEC_ID_WMV1,
4661 .priv_data_size = sizeof(MpegEncContext),
4662 .init = ff_mpv_encode_init,
4663 .encode2 = ff_mpv_encode_picture,
4664 .close = ff_mpv_encode_end,
4665 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4666 .priv_class = &wmv1_class,