2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
32 #include "libavutil/internal.h"
33 #include "libavutil/intmath.h"
34 #include "libavutil/mathematics.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/opt.h"
41 #include "mpegvideo.h"
49 #include "aandcttab.h"
51 #include "mpeg4video.h"
53 #include "bytestream.h"
56 static int encode_picture(MpegEncContext *s, int picture_number);
57 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
58 static int sse_mb(MpegEncContext *s);
59 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
60 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
62 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
63 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
65 const AVOption ff_mpv_generic_options[] = {
70 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
71 uint16_t (*qmat16)[2][64],
72 const uint16_t *quant_matrix,
73 int bias, int qmin, int qmax, int intra)
78 for (qscale = qmin; qscale <= qmax; qscale++) {
80 if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
81 dsp->fdct == ff_jpeg_fdct_islow_10 ||
82 dsp->fdct == ff_faandct) {
83 for (i = 0; i < 64; i++) {
84 const int j = dsp->idct_permutation[i];
85 /* 16 <= qscale * quant_matrix[i] <= 7905
86 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
87 * 19952 <= x <= 249205026
88 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
89 * 3444240 >= (1 << 36) / (x) >= 275 */
91 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
92 (qscale * quant_matrix[j]));
94 } else if (dsp->fdct == ff_fdct_ifast) {
95 for (i = 0; i < 64; i++) {
96 const int j = dsp->idct_permutation[i];
97 /* 16 <= qscale * quant_matrix[i] <= 7905
98 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
99 * 19952 <= x <= 249205026
100 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
101 * 3444240 >= (1 << 36) / (x) >= 275 */
103 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
104 (ff_aanscales[i] * qscale *
108 for (i = 0; i < 64; i++) {
109 const int j = dsp->idct_permutation[i];
110 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
111 * Assume x = qscale * quant_matrix[i]
113 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
114 * so 32768 >= (1 << 19) / (x) >= 67 */
115 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
116 (qscale * quant_matrix[j]));
117 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
118 // (qscale * quant_matrix[i]);
119 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
120 (qscale * quant_matrix[j]);
122 if (qmat16[qscale][0][i] == 0 ||
123 qmat16[qscale][0][i] == 128 * 256)
124 qmat16[qscale][0][i] = 128 * 256 - 1;
125 qmat16[qscale][1][i] =
126 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
127 qmat16[qscale][0][i]);
131 for (i = intra; i < 64; i++) {
133 if (dsp->fdct == ff_fdct_ifast) {
134 max = (8191LL * ff_aanscales[i]) >> 14;
136 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
142 av_log(NULL, AV_LOG_INFO,
143 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
148 static inline void update_qscale(MpegEncContext *s)
150 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
151 (FF_LAMBDA_SHIFT + 7);
152 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
154 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
158 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
164 for (i = 0; i < 64; i++) {
165 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
172 * init s->current_picture.qscale_table from s->lambda_table
174 void ff_init_qscale_tab(MpegEncContext *s)
176 int8_t * const qscale_table = s->current_picture.qscale_table;
179 for (i = 0; i < s->mb_num; i++) {
180 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
181 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
182 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
187 static void update_duplicate_context_after_me(MpegEncContext *dst,
190 #define COPY(a) dst->a= src->a
192 COPY(current_picture);
198 COPY(picture_in_gop_number);
199 COPY(gop_picture_number);
200 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
201 COPY(progressive_frame); // FIXME don't set in encode_header
202 COPY(partitioned_frame); // FIXME don't set in encode_header
207 * Set the given MpegEncContext to defaults for encoding.
208 * the changed fields will not depend upon the prior state of the MpegEncContext.
210 static void MPV_encode_defaults(MpegEncContext *s)
213 ff_MPV_common_defaults(s);
215 for (i = -16; i < 16; i++) {
216 default_fcode_tab[i + MAX_MV] = 1;
218 s->me.mv_penalty = default_mv_penalty;
219 s->fcode_tab = default_fcode_tab;
221 s->input_picture_number = 0;
222 s->picture_in_gop_number = 0;
225 /* init video encoder */
226 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
228 MpegEncContext *s = avctx->priv_data;
230 int chroma_h_shift, chroma_v_shift;
232 MPV_encode_defaults(s);
234 switch (avctx->codec_id) {
235 case AV_CODEC_ID_MPEG2VIDEO:
236 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
237 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
238 av_log(avctx, AV_LOG_ERROR,
239 "only YUV420 and YUV422 are supported\n");
243 case AV_CODEC_ID_LJPEG:
244 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
245 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
246 avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
247 avctx->pix_fmt != AV_PIX_FMT_BGRA &&
248 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
249 avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
250 avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
251 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
252 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
256 case AV_CODEC_ID_MJPEG:
257 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
258 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
259 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
260 avctx->pix_fmt != AV_PIX_FMT_YUV422P) ||
261 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
262 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
267 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
268 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
273 switch (avctx->pix_fmt) {
274 case AV_PIX_FMT_YUVJ422P:
275 case AV_PIX_FMT_YUV422P:
276 s->chroma_format = CHROMA_422;
278 case AV_PIX_FMT_YUVJ420P:
279 case AV_PIX_FMT_YUV420P:
281 s->chroma_format = CHROMA_420;
285 s->bit_rate = avctx->bit_rate;
286 s->width = avctx->width;
287 s->height = avctx->height;
288 if (avctx->gop_size > 600 &&
289 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
290 av_log(avctx, AV_LOG_ERROR,
291 "Warning keyframe interval too large! reducing it ...\n");
292 avctx->gop_size = 600;
294 s->gop_size = avctx->gop_size;
296 s->flags = avctx->flags;
297 s->flags2 = avctx->flags2;
298 if (avctx->max_b_frames > MAX_B_FRAMES) {
299 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
300 "is %d.\n", MAX_B_FRAMES);
302 s->max_b_frames = avctx->max_b_frames;
303 s->codec_id = avctx->codec->id;
304 s->strict_std_compliance = avctx->strict_std_compliance;
305 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
306 s->mpeg_quant = avctx->mpeg_quant;
307 s->rtp_mode = !!avctx->rtp_payload_size;
308 s->intra_dc_precision = avctx->intra_dc_precision;
309 s->user_specified_pts = AV_NOPTS_VALUE;
311 if (s->gop_size <= 1) {
318 s->me_method = avctx->me_method;
321 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
323 s->adaptive_quant = (s->avctx->lumi_masking ||
324 s->avctx->dark_masking ||
325 s->avctx->temporal_cplx_masking ||
326 s->avctx->spatial_cplx_masking ||
327 s->avctx->p_masking ||
328 s->avctx->border_masking ||
329 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
332 s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
334 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
335 av_log(avctx, AV_LOG_ERROR,
336 "a vbv buffer size is needed, "
337 "for encoding with a maximum bitrate\n");
341 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
342 av_log(avctx, AV_LOG_INFO,
343 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
346 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
347 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
351 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
352 av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
356 if (avctx->rc_max_rate &&
357 avctx->rc_max_rate == avctx->bit_rate &&
358 avctx->rc_max_rate != avctx->rc_min_rate) {
359 av_log(avctx, AV_LOG_INFO,
360 "impossible bitrate constraints, this will fail\n");
363 if (avctx->rc_buffer_size &&
364 avctx->bit_rate * (int64_t)avctx->time_base.num >
365 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
366 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
370 if (!s->fixed_qscale &&
371 avctx->bit_rate * av_q2d(avctx->time_base) >
372 avctx->bit_rate_tolerance) {
373 av_log(avctx, AV_LOG_ERROR,
374 "bitrate tolerance too small for bitrate\n");
378 if (s->avctx->rc_max_rate &&
379 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
380 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
381 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
382 90000LL * (avctx->rc_buffer_size - 1) >
383 s->avctx->rc_max_rate * 0xFFFFLL) {
384 av_log(avctx, AV_LOG_INFO,
385 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
386 "specified vbv buffer is too large for the given bitrate!\n");
389 if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
390 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
391 s->codec_id != AV_CODEC_ID_FLV1) {
392 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
396 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
397 av_log(avctx, AV_LOG_ERROR,
398 "OBMC is only supported with simple mb decision\n");
402 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
403 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
407 if (s->max_b_frames &&
408 s->codec_id != AV_CODEC_ID_MPEG4 &&
409 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
410 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
411 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
415 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
416 s->codec_id == AV_CODEC_ID_H263 ||
417 s->codec_id == AV_CODEC_ID_H263P) &&
418 (avctx->sample_aspect_ratio.num > 255 ||
419 avctx->sample_aspect_ratio.den > 255)) {
420 av_log(avctx, AV_LOG_ERROR,
421 "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
422 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
426 if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
427 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
428 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
432 // FIXME mpeg2 uses that too
433 if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
434 av_log(avctx, AV_LOG_ERROR,
435 "mpeg2 style quantization not supported by codec\n");
439 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
440 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
444 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
445 s->avctx->mb_decision != FF_MB_DECISION_RD) {
446 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
450 if (s->avctx->scenechange_threshold < 1000000000 &&
451 (s->flags & CODEC_FLAG_CLOSED_GOP)) {
452 av_log(avctx, AV_LOG_ERROR,
453 "closed gop with scene change detection are not supported yet, "
454 "set threshold to 1000000000\n");
458 if (s->flags & CODEC_FLAG_LOW_DELAY) {
459 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
460 av_log(avctx, AV_LOG_ERROR,
461 "low delay forcing is only available for mpeg2\n");
464 if (s->max_b_frames != 0) {
465 av_log(avctx, AV_LOG_ERROR,
466 "b frames cannot be used with low delay\n");
471 if (s->q_scale_type == 1) {
472 if (avctx->qmax > 12) {
473 av_log(avctx, AV_LOG_ERROR,
474 "non linear quant only supports qmax <= 12 currently\n");
479 if (s->avctx->thread_count > 1 &&
480 s->codec_id != AV_CODEC_ID_MPEG4 &&
481 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
482 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
483 (s->codec_id != AV_CODEC_ID_H263P)) {
484 av_log(avctx, AV_LOG_ERROR,
485 "multi threaded encoding not supported by codec\n");
489 if (s->avctx->thread_count < 1) {
490 av_log(avctx, AV_LOG_ERROR,
491 "automatic thread number detection not supported by codec,"
496 if (s->avctx->thread_count > 1)
499 if (!avctx->time_base.den || !avctx->time_base.num) {
500 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
504 i = (INT_MAX / 2 + 128) >> 8;
505 if (avctx->mb_threshold >= i) {
506 av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
511 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
512 av_log(avctx, AV_LOG_INFO,
513 "notice: b_frame_strategy only affects the first pass\n");
514 avctx->b_frame_strategy = 0;
517 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
519 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
520 avctx->time_base.den /= i;
521 avctx->time_base.num /= i;
525 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
526 s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
527 // (a + x * 3 / 8) / x
528 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
529 s->inter_quant_bias = 0;
531 s->intra_quant_bias = 0;
533 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
536 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
537 s->intra_quant_bias = avctx->intra_quant_bias;
538 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
539 s->inter_quant_bias = avctx->inter_quant_bias;
541 av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
544 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
545 s->avctx->time_base.den > (1 << 16) - 1) {
546 av_log(avctx, AV_LOG_ERROR,
547 "timebase %d/%d not supported by MPEG 4 standard, "
548 "the maximum admitted value for the timebase denominator "
549 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
553 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
555 switch (avctx->codec->id) {
556 case AV_CODEC_ID_MPEG1VIDEO:
557 s->out_format = FMT_MPEG1;
558 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
559 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
561 case AV_CODEC_ID_MPEG2VIDEO:
562 s->out_format = FMT_MPEG1;
563 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
564 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
567 case AV_CODEC_ID_LJPEG:
568 case AV_CODEC_ID_MJPEG:
569 s->out_format = FMT_MJPEG;
570 s->intra_only = 1; /* force intra only for jpeg */
571 if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
572 avctx->pix_fmt == AV_PIX_FMT_BGRA) {
573 s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
574 s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
575 s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
577 s->mjpeg_vsample[0] = 2;
578 s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
579 s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
580 s->mjpeg_hsample[0] = 2;
581 s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
582 s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
584 if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
585 ff_mjpeg_encode_init(s) < 0)
590 case AV_CODEC_ID_H261:
591 if (!CONFIG_H261_ENCODER)
593 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
594 av_log(avctx, AV_LOG_ERROR,
595 "The specified picture size of %dx%d is not valid for the "
596 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
597 s->width, s->height);
600 s->out_format = FMT_H261;
604 case AV_CODEC_ID_H263:
605 if (!CONFIG_H263_ENCODER)
607 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
608 s->width, s->height) == 8) {
609 av_log(avctx, AV_LOG_INFO,
610 "The specified picture size of %dx%d is not valid for "
611 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
612 "352x288, 704x576, and 1408x1152."
613 "Try H.263+.\n", s->width, s->height);
616 s->out_format = FMT_H263;
620 case AV_CODEC_ID_H263P:
621 s->out_format = FMT_H263;
624 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
625 s->modified_quant = s->h263_aic;
626 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
627 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
630 /* These are just to be sure */
634 case AV_CODEC_ID_FLV1:
635 s->out_format = FMT_H263;
636 s->h263_flv = 2; /* format = 1; 11-bit codes */
637 s->unrestricted_mv = 1;
638 s->rtp_mode = 0; /* don't allow GOB */
642 case AV_CODEC_ID_RV10:
643 s->out_format = FMT_H263;
647 case AV_CODEC_ID_RV20:
648 s->out_format = FMT_H263;
651 s->modified_quant = 1;
655 s->unrestricted_mv = 0;
657 case AV_CODEC_ID_MPEG4:
658 s->out_format = FMT_H263;
660 s->unrestricted_mv = 1;
661 s->low_delay = s->max_b_frames ? 0 : 1;
662 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
664 case AV_CODEC_ID_MSMPEG4V2:
665 s->out_format = FMT_H263;
667 s->unrestricted_mv = 1;
668 s->msmpeg4_version = 2;
672 case AV_CODEC_ID_MSMPEG4V3:
673 s->out_format = FMT_H263;
675 s->unrestricted_mv = 1;
676 s->msmpeg4_version = 3;
677 s->flipflop_rounding = 1;
681 case AV_CODEC_ID_WMV1:
682 s->out_format = FMT_H263;
684 s->unrestricted_mv = 1;
685 s->msmpeg4_version = 4;
686 s->flipflop_rounding = 1;
690 case AV_CODEC_ID_WMV2:
691 s->out_format = FMT_H263;
693 s->unrestricted_mv = 1;
694 s->msmpeg4_version = 5;
695 s->flipflop_rounding = 1;
703 avctx->has_b_frames = !s->low_delay;
707 s->progressive_frame =
708 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
709 CODEC_FLAG_INTERLACED_ME) ||
713 if (ff_MPV_common_init(s) < 0)
717 ff_MPV_encode_init_x86(s);
719 ff_h263dsp_init(&s->h263dsp);
720 if (!s->dct_quantize)
721 s->dct_quantize = ff_dct_quantize_c;
723 s->denoise_dct = denoise_dct_c;
724 s->fast_dct_quantize = s->dct_quantize;
726 s->dct_quantize = dct_quantize_trellis_c;
728 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
729 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
731 s->quant_precision = 5;
733 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
734 ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
736 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
737 ff_h261_encode_init(s);
738 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
739 ff_h263_encode_init(s);
740 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
741 ff_msmpeg4_encode_init(s);
742 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
743 && s->out_format == FMT_MPEG1)
744 ff_mpeg1_encode_init(s);
747 for (i = 0; i < 64; i++) {
748 int j = s->dsp.idct_permutation[i];
749 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
751 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
752 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
753 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
755 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
758 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
759 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
761 if (s->avctx->intra_matrix)
762 s->intra_matrix[j] = s->avctx->intra_matrix[i];
763 if (s->avctx->inter_matrix)
764 s->inter_matrix[j] = s->avctx->inter_matrix[i];
767 /* precompute matrix */
768 /* for mjpeg, we do include qscale in the matrix */
769 if (s->out_format != FMT_MJPEG) {
770 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
771 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
773 ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
774 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
778 if (ff_rate_control_init(s) < 0)
781 #if FF_API_ERROR_RATE
782 FF_DISABLE_DEPRECATION_WARNINGS
783 if (avctx->error_rate)
784 s->error_rate = avctx->error_rate;
785 FF_ENABLE_DEPRECATION_WARNINGS;
788 if (avctx->b_frame_strategy == 2) {
789 for (i = 0; i < s->max_b_frames + 2; i++) {
790 s->tmp_frames[i] = av_frame_alloc();
791 if (!s->tmp_frames[i])
792 return AVERROR(ENOMEM);
794 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
795 s->tmp_frames[i]->width = s->width >> avctx->brd_scale;
796 s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
798 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
807 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
809 MpegEncContext *s = avctx->priv_data;
812 ff_rate_control_uninit(s);
814 ff_MPV_common_end(s);
815 if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
816 s->out_format == FMT_MJPEG)
817 ff_mjpeg_encode_close(s);
819 av_freep(&avctx->extradata);
821 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
822 av_frame_free(&s->tmp_frames[i]);
827 static int get_sae(uint8_t *src, int ref, int stride)
832 for (y = 0; y < 16; y++) {
833 for (x = 0; x < 16; x++) {
834 acc += FFABS(src[x + y * stride] - ref);
841 static int get_intra_count(MpegEncContext *s, uint8_t *src,
842 uint8_t *ref, int stride)
850 for (y = 0; y < h; y += 16) {
851 for (x = 0; x < w; x += 16) {
852 int offset = x + y * stride;
853 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
855 int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
856 int sae = get_sae(src + offset, mean, stride);
858 acc += sae + 500 < sad;
865 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
869 int i, display_picture_number = 0, ret;
870 const int encoding_delay = s->max_b_frames ? s->max_b_frames :
871 (s->low_delay ? 0 : 1);
876 display_picture_number = s->input_picture_number++;
878 if (pts != AV_NOPTS_VALUE) {
879 if (s->user_specified_pts != AV_NOPTS_VALUE) {
881 int64_t last = s->user_specified_pts;
884 av_log(s->avctx, AV_LOG_ERROR,
885 "Error, Invalid timestamp=%"PRId64", "
886 "last=%"PRId64"\n", pts, s->user_specified_pts);
890 if (!s->low_delay && display_picture_number == 1)
891 s->dts_delta = time - last;
893 s->user_specified_pts = pts;
895 if (s->user_specified_pts != AV_NOPTS_VALUE) {
896 s->user_specified_pts =
897 pts = s->user_specified_pts + 1;
898 av_log(s->avctx, AV_LOG_INFO,
899 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
902 pts = display_picture_number;
908 if (!pic_arg->buf[0]);
910 if (pic_arg->linesize[0] != s->linesize)
912 if (pic_arg->linesize[1] != s->uvlinesize)
914 if (pic_arg->linesize[2] != s->uvlinesize)
917 av_dlog(s->avctx, "%d %d %td %td\n", pic_arg->linesize[0],
918 pic_arg->linesize[1], s->linesize, s->uvlinesize);
921 i = ff_find_unused_picture(s, 1);
925 pic = &s->picture[i];
928 if ((ret = av_frame_ref(&pic->f, pic_arg)) < 0)
930 if (ff_alloc_picture(s, pic, 1) < 0) {
934 i = ff_find_unused_picture(s, 0);
938 pic = &s->picture[i];
941 if (ff_alloc_picture(s, pic, 0) < 0) {
945 if (pic->f.data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
946 pic->f.data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
947 pic->f.data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
950 int h_chroma_shift, v_chroma_shift;
951 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
955 for (i = 0; i < 3; i++) {
956 int src_stride = pic_arg->linesize[i];
957 int dst_stride = i ? s->uvlinesize : s->linesize;
958 int h_shift = i ? h_chroma_shift : 0;
959 int v_shift = i ? v_chroma_shift : 0;
960 int w = s->width >> h_shift;
961 int h = s->height >> v_shift;
962 uint8_t *src = pic_arg->data[i];
963 uint8_t *dst = pic->f.data[i];
965 if (!s->avctx->rc_buffer_size)
966 dst += INPLACE_OFFSET;
968 if (src_stride == dst_stride)
969 memcpy(dst, src, src_stride * h);
980 ret = av_frame_copy_props(&pic->f, pic_arg);
984 pic->f.display_picture_number = display_picture_number;
985 pic->f.pts = pts; // we set this here to avoid modifiying pic_arg
988 /* shift buffer entries */
989 for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
990 s->input_picture[i - 1] = s->input_picture[i];
992 s->input_picture[encoding_delay] = (Picture*) pic;
997 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1001 int64_t score64 = 0;
1003 for (plane = 0; plane < 3; plane++) {
1004 const int stride = p->f.linesize[plane];
1005 const int bw = plane ? 1 : 2;
1006 for (y = 0; y < s->mb_height * bw; y++) {
1007 for (x = 0; x < s->mb_width * bw; x++) {
1008 int off = p->shared ? 0 : 16;
1009 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
1010 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
1011 int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1013 switch (s->avctx->frame_skip_exp) {
1014 case 0: score = FFMAX(score, v); break;
1015 case 1: score += FFABS(v); break;
1016 case 2: score += v * v; break;
1017 case 3: score64 += FFABS(v * v * (int64_t)v); break;
1018 case 4: score64 += v * v * (int64_t)(v * v); break;
1027 if (score64 < s->avctx->frame_skip_threshold)
1029 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1034 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1036 AVPacket pkt = { 0 };
1037 int ret, got_output;
1039 av_init_packet(&pkt);
1040 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1045 av_free_packet(&pkt);
1049 static int estimate_best_b_count(MpegEncContext *s)
1051 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1052 AVCodecContext *c = avcodec_alloc_context3(NULL);
1053 const int scale = s->avctx->brd_scale;
1054 int i, j, out_size, p_lambda, b_lambda, lambda2;
1055 int64_t best_rd = INT64_MAX;
1056 int best_b_count = -1;
1058 assert(scale >= 0 && scale <= 3);
1061 //s->next_picture_ptr->quality;
1062 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1063 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1064 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1065 if (!b_lambda) // FIXME we should do this somewhere else
1066 b_lambda = p_lambda;
1067 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1070 c->width = s->width >> scale;
1071 c->height = s->height >> scale;
1072 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
1073 CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1074 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1075 c->mb_decision = s->avctx->mb_decision;
1076 c->me_cmp = s->avctx->me_cmp;
1077 c->mb_cmp = s->avctx->mb_cmp;
1078 c->me_sub_cmp = s->avctx->me_sub_cmp;
1079 c->pix_fmt = AV_PIX_FMT_YUV420P;
1080 c->time_base = s->avctx->time_base;
1081 c->max_b_frames = s->max_b_frames;
1083 if (avcodec_open2(c, codec, NULL) < 0)
1086 for (i = 0; i < s->max_b_frames + 2; i++) {
1087 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1088 s->next_picture_ptr;
1090 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1091 pre_input = *pre_input_ptr;
1093 if (!pre_input.shared && i) {
1094 pre_input.f.data[0] += INPLACE_OFFSET;
1095 pre_input.f.data[1] += INPLACE_OFFSET;
1096 pre_input.f.data[2] += INPLACE_OFFSET;
1099 s->dsp.shrink[scale](s->tmp_frames[i]->data[0], s->tmp_frames[i]->linesize[0],
1100 pre_input.f.data[0], pre_input.f.linesize[0],
1101 c->width, c->height);
1102 s->dsp.shrink[scale](s->tmp_frames[i]->data[1], s->tmp_frames[i]->linesize[1],
1103 pre_input.f.data[1], pre_input.f.linesize[1],
1104 c->width >> 1, c->height >> 1);
1105 s->dsp.shrink[scale](s->tmp_frames[i]->data[2], s->tmp_frames[i]->linesize[2],
1106 pre_input.f.data[2], pre_input.f.linesize[2],
1107 c->width >> 1, c->height >> 1);
1111 for (j = 0; j < s->max_b_frames + 1; j++) {
1114 if (!s->input_picture[j])
1117 c->error[0] = c->error[1] = c->error[2] = 0;
1119 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1120 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1122 out_size = encode_frame(c, s->tmp_frames[0]);
1124 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1126 for (i = 0; i < s->max_b_frames + 1; i++) {
1127 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1129 s->tmp_frames[i + 1]->pict_type = is_p ?
1130 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1131 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1133 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1135 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1138 /* get the delayed frames */
1140 out_size = encode_frame(c, NULL);
1141 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1144 rd += c->error[0] + c->error[1] + c->error[2];
1155 return best_b_count;
1158 static int select_input_picture(MpegEncContext *s)
1162 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1163 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1164 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1166 /* set next picture type & ordering */
1167 if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1168 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1169 s->next_picture_ptr == NULL || s->intra_only) {
1170 s->reordered_input_picture[0] = s->input_picture[0];
1171 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1172 s->reordered_input_picture[0]->f.coded_picture_number =
1173 s->coded_picture_number++;
1177 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1178 if (s->picture_in_gop_number < s->gop_size &&
1179 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1180 // FIXME check that te gop check above is +-1 correct
1181 av_frame_unref(&s->input_picture[0]->f);
1184 ff_vbv_update(s, 0);
1190 if (s->flags & CODEC_FLAG_PASS2) {
1191 for (i = 0; i < s->max_b_frames + 1; i++) {
1192 int pict_num = s->input_picture[0]->f.display_picture_number + i;
1194 if (pict_num >= s->rc_context.num_entries)
1196 if (!s->input_picture[i]) {
1197 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1201 s->input_picture[i]->f.pict_type =
1202 s->rc_context.entry[pict_num].new_pict_type;
1206 if (s->avctx->b_frame_strategy == 0) {
1207 b_frames = s->max_b_frames;
1208 while (b_frames && !s->input_picture[b_frames])
1210 } else if (s->avctx->b_frame_strategy == 1) {
1211 for (i = 1; i < s->max_b_frames + 1; i++) {
1212 if (s->input_picture[i] &&
1213 s->input_picture[i]->b_frame_score == 0) {
1214 s->input_picture[i]->b_frame_score =
1216 s->input_picture[i ]->f.data[0],
1217 s->input_picture[i - 1]->f.data[0],
1221 for (i = 0; i < s->max_b_frames + 1; i++) {
1222 if (s->input_picture[i] == NULL ||
1223 s->input_picture[i]->b_frame_score - 1 >
1224 s->mb_num / s->avctx->b_sensitivity)
1228 b_frames = FFMAX(0, i - 1);
1231 for (i = 0; i < b_frames + 1; i++) {
1232 s->input_picture[i]->b_frame_score = 0;
1234 } else if (s->avctx->b_frame_strategy == 2) {
1235 b_frames = estimate_best_b_count(s);
1237 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1243 for (i = b_frames - 1; i >= 0; i--) {
1244 int type = s->input_picture[i]->f.pict_type;
1245 if (type && type != AV_PICTURE_TYPE_B)
1248 if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1249 b_frames == s->max_b_frames) {
1250 av_log(s->avctx, AV_LOG_ERROR,
1251 "warning, too many b frames in a row\n");
1254 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1255 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1256 s->gop_size > s->picture_in_gop_number) {
1257 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1259 if (s->flags & CODEC_FLAG_CLOSED_GOP)
1261 s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1265 if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1266 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1269 s->reordered_input_picture[0] = s->input_picture[b_frames];
1270 if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1271 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1272 s->reordered_input_picture[0]->f.coded_picture_number =
1273 s->coded_picture_number++;
1274 for (i = 0; i < b_frames; i++) {
1275 s->reordered_input_picture[i + 1] = s->input_picture[i];
1276 s->reordered_input_picture[i + 1]->f.pict_type =
1278 s->reordered_input_picture[i + 1]->f.coded_picture_number =
1279 s->coded_picture_number++;
1284 if (s->reordered_input_picture[0]) {
1285 s->reordered_input_picture[0]->reference =
1286 s->reordered_input_picture[0]->f.pict_type !=
1287 AV_PICTURE_TYPE_B ? 3 : 0;
1289 ff_mpeg_unref_picture(s, &s->new_picture);
1290 if ((ret = ff_mpeg_ref_picture(s, &s->new_picture, s->reordered_input_picture[0])))
1293 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1294 // input is a shared pix, so we can't modifiy it -> alloc a new
1295 // one & ensure that the shared one is reuseable
1298 int i = ff_find_unused_picture(s, 0);
1301 pic = &s->picture[i];
1303 pic->reference = s->reordered_input_picture[0]->reference;
1304 if (ff_alloc_picture(s, pic, 0) < 0) {
1308 ret = av_frame_copy_props(&pic->f, &s->reordered_input_picture[0]->f);
1312 /* mark us unused / free shared pic */
1313 av_frame_unref(&s->reordered_input_picture[0]->f);
1314 s->reordered_input_picture[0]->shared = 0;
1316 s->current_picture_ptr = pic;
1318 // input is not a shared pix -> reuse buffer for current_pix
1319 s->current_picture_ptr = s->reordered_input_picture[0];
1320 for (i = 0; i < 4; i++) {
1321 s->new_picture.f.data[i] += INPLACE_OFFSET;
1324 ff_mpeg_unref_picture(s, &s->current_picture);
1325 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1326 s->current_picture_ptr)) < 0)
1329 s->picture_number = s->new_picture.f.display_picture_number;
1331 ff_mpeg_unref_picture(s, &s->new_picture);
1336 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1337 const AVFrame *pic_arg, int *got_packet)
1339 MpegEncContext *s = avctx->priv_data;
1340 int i, stuffing_count, ret;
1341 int context_count = s->slice_context_count;
1343 s->picture_in_gop_number++;
1345 if (load_input_picture(s, pic_arg) < 0)
1348 if (select_input_picture(s) < 0) {
1353 if (s->new_picture.f.data[0]) {
1355 (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1358 s->mb_info_ptr = av_packet_new_side_data(pkt,
1359 AV_PKT_DATA_H263_MB_INFO,
1360 s->mb_width*s->mb_height*12);
1361 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1364 for (i = 0; i < context_count; i++) {
1365 int start_y = s->thread_context[i]->start_mb_y;
1366 int end_y = s->thread_context[i]-> end_mb_y;
1367 int h = s->mb_height;
1368 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1369 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1371 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1374 s->pict_type = s->new_picture.f.pict_type;
1376 ff_MPV_frame_start(s, avctx);
1378 if (encode_picture(s, s->picture_number) < 0)
1381 avctx->header_bits = s->header_bits;
1382 avctx->mv_bits = s->mv_bits;
1383 avctx->misc_bits = s->misc_bits;
1384 avctx->i_tex_bits = s->i_tex_bits;
1385 avctx->p_tex_bits = s->p_tex_bits;
1386 avctx->i_count = s->i_count;
1387 // FIXME f/b_count in avctx
1388 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1389 avctx->skip_count = s->skip_count;
1391 ff_MPV_frame_end(s);
1393 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1394 ff_mjpeg_encode_picture_trailer(s);
1396 if (avctx->rc_buffer_size) {
1397 RateControlContext *rcc = &s->rc_context;
1398 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1400 if (put_bits_count(&s->pb) > max_size &&
1401 s->lambda < s->avctx->lmax) {
1402 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1403 (s->qscale + 1) / s->qscale);
1404 if (s->adaptive_quant) {
1406 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1407 s->lambda_table[i] =
1408 FFMAX(s->lambda_table[i] + 1,
1409 s->lambda_table[i] * (s->qscale + 1) /
1412 s->mb_skipped = 0; // done in MPV_frame_start()
1413 // done in encode_picture() so we must undo it
1414 if (s->pict_type == AV_PICTURE_TYPE_P) {
1415 if (s->flipflop_rounding ||
1416 s->codec_id == AV_CODEC_ID_H263P ||
1417 s->codec_id == AV_CODEC_ID_MPEG4)
1418 s->no_rounding ^= 1;
1420 if (s->pict_type != AV_PICTURE_TYPE_B) {
1421 s->time_base = s->last_time_base;
1422 s->last_non_b_time = s->time - s->pp_time;
1424 for (i = 0; i < context_count; i++) {
1425 PutBitContext *pb = &s->thread_context[i]->pb;
1426 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1431 assert(s->avctx->rc_max_rate);
1434 if (s->flags & CODEC_FLAG_PASS1)
1435 ff_write_pass1_stats(s);
1437 for (i = 0; i < 4; i++) {
1438 s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
1439 avctx->error[i] += s->current_picture_ptr->f.error[i];
1442 if (s->flags & CODEC_FLAG_PASS1)
1443 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1444 avctx->i_tex_bits + avctx->p_tex_bits ==
1445 put_bits_count(&s->pb));
1446 flush_put_bits(&s->pb);
1447 s->frame_bits = put_bits_count(&s->pb);
1449 stuffing_count = ff_vbv_update(s, s->frame_bits);
1450 if (stuffing_count) {
1451 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1452 stuffing_count + 50) {
1453 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1457 switch (s->codec_id) {
1458 case AV_CODEC_ID_MPEG1VIDEO:
1459 case AV_CODEC_ID_MPEG2VIDEO:
1460 while (stuffing_count--) {
1461 put_bits(&s->pb, 8, 0);
1464 case AV_CODEC_ID_MPEG4:
1465 put_bits(&s->pb, 16, 0);
1466 put_bits(&s->pb, 16, 0x1C3);
1467 stuffing_count -= 4;
1468 while (stuffing_count--) {
1469 put_bits(&s->pb, 8, 0xFF);
1473 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1475 flush_put_bits(&s->pb);
1476 s->frame_bits = put_bits_count(&s->pb);
1479 /* update mpeg1/2 vbv_delay for CBR */
1480 if (s->avctx->rc_max_rate &&
1481 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1482 s->out_format == FMT_MPEG1 &&
1483 90000LL * (avctx->rc_buffer_size - 1) <=
1484 s->avctx->rc_max_rate * 0xFFFFLL) {
1485 int vbv_delay, min_delay;
1486 double inbits = s->avctx->rc_max_rate *
1487 av_q2d(s->avctx->time_base);
1488 int minbits = s->frame_bits - 8 *
1489 (s->vbv_delay_ptr - s->pb.buf - 1);
1490 double bits = s->rc_context.buffer_index + minbits - inbits;
1493 av_log(s->avctx, AV_LOG_ERROR,
1494 "Internal error, negative bits\n");
1496 assert(s->repeat_first_field == 0);
1498 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1499 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1500 s->avctx->rc_max_rate;
1502 vbv_delay = FFMAX(vbv_delay, min_delay);
1504 assert(vbv_delay < 0xFFFF);
1506 s->vbv_delay_ptr[0] &= 0xF8;
1507 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1508 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1509 s->vbv_delay_ptr[2] &= 0x07;
1510 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1511 avctx->vbv_delay = vbv_delay * 300;
1513 s->total_bits += s->frame_bits;
1514 avctx->frame_bits = s->frame_bits;
1516 pkt->pts = s->current_picture.f.pts;
1517 if (!s->low_delay) {
1518 if (!s->current_picture.f.coded_picture_number)
1519 pkt->dts = pkt->pts - s->dts_delta;
1521 pkt->dts = s->reordered_pts;
1522 s->reordered_pts = s->input_picture[0]->f.pts;
1524 pkt->dts = pkt->pts;
1525 if (s->current_picture.f.key_frame)
1526 pkt->flags |= AV_PKT_FLAG_KEY;
1528 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1532 assert((s->frame_bits & 7) == 0);
1534 pkt->size = s->frame_bits / 8;
1535 *got_packet = !!pkt->size;
1539 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1540 int n, int threshold)
1542 static const char tab[64] = {
1543 3, 2, 2, 1, 1, 1, 1, 1,
1544 1, 1, 1, 1, 1, 1, 1, 1,
1545 1, 1, 1, 1, 1, 1, 1, 1,
1546 0, 0, 0, 0, 0, 0, 0, 0,
1547 0, 0, 0, 0, 0, 0, 0, 0,
1548 0, 0, 0, 0, 0, 0, 0, 0,
1549 0, 0, 0, 0, 0, 0, 0, 0,
1550 0, 0, 0, 0, 0, 0, 0, 0
1555 int16_t *block = s->block[n];
1556 const int last_index = s->block_last_index[n];
1559 if (threshold < 0) {
1561 threshold = -threshold;
1565 /* Are all we could set to zero already zero? */
1566 if (last_index <= skip_dc - 1)
1569 for (i = 0; i <= last_index; i++) {
1570 const int j = s->intra_scantable.permutated[i];
1571 const int level = FFABS(block[j]);
1573 if (skip_dc && i == 0)
1577 } else if (level > 1) {
1583 if (score >= threshold)
1585 for (i = skip_dc; i <= last_index; i++) {
1586 const int j = s->intra_scantable.permutated[i];
1590 s->block_last_index[n] = 0;
1592 s->block_last_index[n] = -1;
1595 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1599 const int maxlevel = s->max_qcoeff;
1600 const int minlevel = s->min_qcoeff;
1604 i = 1; // skip clipping of intra dc
1608 for (; i <= last_index; i++) {
1609 const int j = s->intra_scantable.permutated[i];
1610 int level = block[j];
1612 if (level > maxlevel) {
1615 } else if (level < minlevel) {
1623 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1624 av_log(s->avctx, AV_LOG_INFO,
1625 "warning, clipping %d dct coefficients to %d..%d\n",
1626 overflow, minlevel, maxlevel);
1629 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1633 for (y = 0; y < 8; y++) {
1634 for (x = 0; x < 8; x++) {
1640 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1641 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1642 int v = ptr[x2 + y2 * stride];
1648 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1653 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1654 int motion_x, int motion_y,
1655 int mb_block_height,
1658 int16_t weight[8][64];
1659 int16_t orig[8][64];
1660 const int mb_x = s->mb_x;
1661 const int mb_y = s->mb_y;
1664 int dct_offset = s->linesize * 8; // default for progressive frames
1665 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1666 ptrdiff_t wrap_y, wrap_c;
1668 for (i = 0; i < mb_block_count; i++)
1669 skip_dct[i] = s->skipdct;
1671 if (s->adaptive_quant) {
1672 const int last_qp = s->qscale;
1673 const int mb_xy = mb_x + mb_y * s->mb_stride;
1675 s->lambda = s->lambda_table[mb_xy];
1678 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1679 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
1680 s->dquant = s->qscale - last_qp;
1682 if (s->out_format == FMT_H263) {
1683 s->dquant = av_clip(s->dquant, -2, 2);
1685 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1687 if (s->pict_type == AV_PICTURE_TYPE_B) {
1688 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1691 if (s->mv_type == MV_TYPE_8X8)
1697 ff_set_qscale(s, last_qp + s->dquant);
1698 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1699 ff_set_qscale(s, s->qscale + s->dquant);
1701 wrap_y = s->linesize;
1702 wrap_c = s->uvlinesize;
1703 ptr_y = s->new_picture.f.data[0] +
1704 (mb_y * 16 * wrap_y) + mb_x * 16;
1705 ptr_cb = s->new_picture.f.data[1] +
1706 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1707 ptr_cr = s->new_picture.f.data[2] +
1708 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1710 if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1711 uint8_t *ebuf = s->edge_emu_buffer + 32;
1712 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
1714 16, 16, mb_x * 16, mb_y * 16,
1715 s->width, s->height);
1717 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb,
1719 8, mb_block_height, mb_x * 8, mb_y * 8,
1720 s->width >> 1, s->height >> 1);
1721 ptr_cb = ebuf + 18 * wrap_y;
1722 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr,
1724 8, mb_block_height, mb_x * 8, mb_y * 8,
1725 s->width >> 1, s->height >> 1);
1726 ptr_cr = ebuf + 18 * wrap_y + 8;
1730 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1731 int progressive_score, interlaced_score;
1733 s->interlaced_dct = 0;
1734 progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1736 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1737 NULL, wrap_y, 8) - 400;
1739 if (progressive_score > 0) {
1740 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1741 NULL, wrap_y * 2, 8) +
1742 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1743 NULL, wrap_y * 2, 8);
1744 if (progressive_score > interlaced_score) {
1745 s->interlaced_dct = 1;
1747 dct_offset = wrap_y;
1749 if (s->chroma_format == CHROMA_422)
1755 s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
1756 s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
1757 s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
1758 s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1760 if (s->flags & CODEC_FLAG_GRAY) {
1764 s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1765 s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1766 if (!s->chroma_y_shift) { /* 422 */
1767 s->dsp.get_pixels(s->block[6],
1768 ptr_cb + (dct_offset >> 1), wrap_c);
1769 s->dsp.get_pixels(s->block[7],
1770 ptr_cr + (dct_offset >> 1), wrap_c);
1774 op_pixels_func (*op_pix)[4];
1775 qpel_mc_func (*op_qpix)[16];
1776 uint8_t *dest_y, *dest_cb, *dest_cr;
1778 dest_y = s->dest[0];
1779 dest_cb = s->dest[1];
1780 dest_cr = s->dest[2];
1782 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1783 op_pix = s->hdsp.put_pixels_tab;
1784 op_qpix = s->dsp.put_qpel_pixels_tab;
1786 op_pix = s->hdsp.put_no_rnd_pixels_tab;
1787 op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1790 if (s->mv_dir & MV_DIR_FORWARD) {
1791 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1792 s->last_picture.f.data,
1794 op_pix = s->hdsp.avg_pixels_tab;
1795 op_qpix = s->dsp.avg_qpel_pixels_tab;
1797 if (s->mv_dir & MV_DIR_BACKWARD) {
1798 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1799 s->next_picture.f.data,
1803 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1804 int progressive_score, interlaced_score;
1806 s->interlaced_dct = 0;
1807 progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1810 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1811 ptr_y + wrap_y * 8, wrap_y,
1814 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1815 progressive_score -= 400;
1817 if (progressive_score > 0) {
1818 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1821 s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1825 if (progressive_score > interlaced_score) {
1826 s->interlaced_dct = 1;
1828 dct_offset = wrap_y;
1830 if (s->chroma_format == CHROMA_422)
1836 s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1837 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1838 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1839 dest_y + dct_offset, wrap_y);
1840 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1841 dest_y + dct_offset + 8, wrap_y);
1843 if (s->flags & CODEC_FLAG_GRAY) {
1847 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1848 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1849 if (!s->chroma_y_shift) { /* 422 */
1850 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
1851 dest_cb + (dct_offset >> 1), wrap_c);
1852 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
1853 dest_cr + (dct_offset >> 1), wrap_c);
1856 /* pre quantization */
1857 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1858 2 * s->qscale * s->qscale) {
1860 if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1861 wrap_y, 8) < 20 * s->qscale)
1863 if (s->dsp.sad[1](NULL, ptr_y + 8,
1864 dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1866 if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1867 dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1869 if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1870 dest_y + dct_offset + 8,
1871 wrap_y, 8) < 20 * s->qscale)
1873 if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1874 wrap_c, 8) < 20 * s->qscale)
1876 if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1877 wrap_c, 8) < 20 * s->qscale)
1879 if (!s->chroma_y_shift) { /* 422 */
1880 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
1881 dest_cb + (dct_offset >> 1),
1882 wrap_c, 8) < 20 * s->qscale)
1884 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
1885 dest_cr + (dct_offset >> 1),
1886 wrap_c, 8) < 20 * s->qscale)
1892 if (s->quantizer_noise_shaping) {
1894 get_visual_weight(weight[0], ptr_y , wrap_y);
1896 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
1898 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
1900 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
1902 get_visual_weight(weight[4], ptr_cb , wrap_c);
1904 get_visual_weight(weight[5], ptr_cr , wrap_c);
1905 if (!s->chroma_y_shift) { /* 422 */
1907 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
1910 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
1913 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
1916 /* DCT & quantize */
1917 assert(s->out_format != FMT_MJPEG || s->qscale == 8);
1919 for (i = 0; i < mb_block_count; i++) {
1922 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1923 // FIXME we could decide to change to quantizer instead of
1925 // JS: I don't think that would be a good idea it could lower
1926 // quality instead of improve it. Just INTRADC clipping
1927 // deserves changes in quantizer
1929 clip_coeffs(s, s->block[i], s->block_last_index[i]);
1931 s->block_last_index[i] = -1;
1933 if (s->quantizer_noise_shaping) {
1934 for (i = 0; i < mb_block_count; i++) {
1936 s->block_last_index[i] =
1937 dct_quantize_refine(s, s->block[i], weight[i],
1938 orig[i], i, s->qscale);
1943 if (s->luma_elim_threshold && !s->mb_intra)
1944 for (i = 0; i < 4; i++)
1945 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
1946 if (s->chroma_elim_threshold && !s->mb_intra)
1947 for (i = 4; i < mb_block_count; i++)
1948 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
1950 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
1951 for (i = 0; i < mb_block_count; i++) {
1952 if (s->block_last_index[i] == -1)
1953 s->coded_score[i] = INT_MAX / 256;
1958 if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
1959 s->block_last_index[4] =
1960 s->block_last_index[5] = 0;
1962 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
1965 // non c quantize code returns incorrect block_last_index FIXME
1966 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
1967 for (i = 0; i < mb_block_count; i++) {
1969 if (s->block_last_index[i] > 0) {
1970 for (j = 63; j > 0; j--) {
1971 if (s->block[i][s->intra_scantable.permutated[j]])
1974 s->block_last_index[i] = j;
1979 /* huffman encode */
1980 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
1981 case AV_CODEC_ID_MPEG1VIDEO:
1982 case AV_CODEC_ID_MPEG2VIDEO:
1983 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1984 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
1986 case AV_CODEC_ID_MPEG4:
1987 if (CONFIG_MPEG4_ENCODER)
1988 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
1990 case AV_CODEC_ID_MSMPEG4V2:
1991 case AV_CODEC_ID_MSMPEG4V3:
1992 case AV_CODEC_ID_WMV1:
1993 if (CONFIG_MSMPEG4_ENCODER)
1994 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
1996 case AV_CODEC_ID_WMV2:
1997 if (CONFIG_WMV2_ENCODER)
1998 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2000 case AV_CODEC_ID_H261:
2001 if (CONFIG_H261_ENCODER)
2002 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2004 case AV_CODEC_ID_H263:
2005 case AV_CODEC_ID_H263P:
2006 case AV_CODEC_ID_FLV1:
2007 case AV_CODEC_ID_RV10:
2008 case AV_CODEC_ID_RV20:
2009 if (CONFIG_H263_ENCODER)
2010 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2012 case AV_CODEC_ID_MJPEG:
2013 if (CONFIG_MJPEG_ENCODER)
2014 ff_mjpeg_encode_mb(s, s->block);
2021 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2023 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
2024 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
2027 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2030 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2033 d->mb_skip_run= s->mb_skip_run;
2035 d->last_dc[i] = s->last_dc[i];
2038 d->mv_bits= s->mv_bits;
2039 d->i_tex_bits= s->i_tex_bits;
2040 d->p_tex_bits= s->p_tex_bits;
2041 d->i_count= s->i_count;
2042 d->f_count= s->f_count;
2043 d->b_count= s->b_count;
2044 d->skip_count= s->skip_count;
2045 d->misc_bits= s->misc_bits;
2049 d->qscale= s->qscale;
2050 d->dquant= s->dquant;
2052 d->esc3_level_length= s->esc3_level_length;
2055 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2058 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2059 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2062 d->mb_skip_run= s->mb_skip_run;
2064 d->last_dc[i] = s->last_dc[i];
2067 d->mv_bits= s->mv_bits;
2068 d->i_tex_bits= s->i_tex_bits;
2069 d->p_tex_bits= s->p_tex_bits;
2070 d->i_count= s->i_count;
2071 d->f_count= s->f_count;
2072 d->b_count= s->b_count;
2073 d->skip_count= s->skip_count;
2074 d->misc_bits= s->misc_bits;
2076 d->mb_intra= s->mb_intra;
2077 d->mb_skipped= s->mb_skipped;
2078 d->mv_type= s->mv_type;
2079 d->mv_dir= s->mv_dir;
2081 if(s->data_partitioning){
2083 d->tex_pb= s->tex_pb;
2087 d->block_last_index[i]= s->block_last_index[i];
2088 d->interlaced_dct= s->interlaced_dct;
2089 d->qscale= s->qscale;
2091 d->esc3_level_length= s->esc3_level_length;
2094 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2095 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2096 int *dmin, int *next_block, int motion_x, int motion_y)
2099 uint8_t *dest_backup[3];
2101 copy_context_before_encode(s, backup, type);
2103 s->block= s->blocks[*next_block];
2104 s->pb= pb[*next_block];
2105 if(s->data_partitioning){
2106 s->pb2 = pb2 [*next_block];
2107 s->tex_pb= tex_pb[*next_block];
2111 memcpy(dest_backup, s->dest, sizeof(s->dest));
2112 s->dest[0] = s->rd_scratchpad;
2113 s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2114 s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2115 assert(s->linesize >= 32); //FIXME
2118 encode_mb(s, motion_x, motion_y);
2120 score= put_bits_count(&s->pb);
2121 if(s->data_partitioning){
2122 score+= put_bits_count(&s->pb2);
2123 score+= put_bits_count(&s->tex_pb);
2126 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2127 ff_MPV_decode_mb(s, s->block);
2129 score *= s->lambda2;
2130 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2134 memcpy(s->dest, dest_backup, sizeof(s->dest));
2141 copy_context_after_encode(best, s, type);
2145 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2146 uint32_t *sq = ff_squareTbl + 256;
2151 return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2152 else if(w==8 && h==8)
2153 return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2157 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2166 static int sse_mb(MpegEncContext *s){
2170 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2171 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2174 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2175 return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2176 +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2177 +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2179 return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2180 +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2181 +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2184 return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2185 +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2186 +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2189 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2190 MpegEncContext *s= *(void**)arg;
2194 s->me.dia_size= s->avctx->pre_dia_size;
2195 s->first_slice_line=1;
2196 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2197 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2198 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2200 s->first_slice_line=0;
2208 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2209 MpegEncContext *s= *(void**)arg;
2211 ff_check_alignment();
2213 s->me.dia_size= s->avctx->dia_size;
2214 s->first_slice_line=1;
2215 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2216 s->mb_x=0; //for block init below
2217 ff_init_block_index(s);
2218 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2219 s->block_index[0]+=2;
2220 s->block_index[1]+=2;
2221 s->block_index[2]+=2;
2222 s->block_index[3]+=2;
2224 /* compute motion vector & mb_type and store in context */
2225 if(s->pict_type==AV_PICTURE_TYPE_B)
2226 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2228 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2230 s->first_slice_line=0;
2235 static int mb_var_thread(AVCodecContext *c, void *arg){
2236 MpegEncContext *s= *(void**)arg;
2239 ff_check_alignment();
2241 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2242 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2245 uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2247 int sum = s->dsp.pix_sum(pix, s->linesize);
2249 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2251 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2252 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2253 s->me.mb_var_sum_temp += varc;
2259 static void write_slice_end(MpegEncContext *s){
2260 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2261 if(s->partitioned_frame){
2262 ff_mpeg4_merge_partitions(s);
2265 ff_mpeg4_stuffing(&s->pb);
2266 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2267 ff_mjpeg_encode_stuffing(&s->pb);
2270 avpriv_align_put_bits(&s->pb);
2271 flush_put_bits(&s->pb);
2273 if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2274 s->misc_bits+= get_bits_diff(s);
2277 static void write_mb_info(MpegEncContext *s)
2279 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2280 int offset = put_bits_count(&s->pb);
2281 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2282 int gobn = s->mb_y / s->gob_index;
2284 if (CONFIG_H263_ENCODER)
2285 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2286 bytestream_put_le32(&ptr, offset);
2287 bytestream_put_byte(&ptr, s->qscale);
2288 bytestream_put_byte(&ptr, gobn);
2289 bytestream_put_le16(&ptr, mba);
2290 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2291 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2292 /* 4MV not implemented */
2293 bytestream_put_byte(&ptr, 0); /* hmv2 */
2294 bytestream_put_byte(&ptr, 0); /* vmv2 */
2297 static void update_mb_info(MpegEncContext *s, int startcode)
2301 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2302 s->mb_info_size += 12;
2303 s->prev_mb_info = s->last_mb_info;
2306 s->prev_mb_info = put_bits_count(&s->pb)/8;
2307 /* This might have incremented mb_info_size above, and we return without
2308 * actually writing any info into that slot yet. But in that case,
2309 * this will be called again at the start of the after writing the
2310 * start code, actually writing the mb info. */
2314 s->last_mb_info = put_bits_count(&s->pb)/8;
2315 if (!s->mb_info_size)
2316 s->mb_info_size += 12;
2320 static int encode_thread(AVCodecContext *c, void *arg){
2321 MpegEncContext *s= *(void**)arg;
2322 int mb_x, mb_y, pdif = 0;
2323 int chr_h= 16>>s->chroma_y_shift;
2325 MpegEncContext best_s, backup_s;
2326 uint8_t bit_buf[2][MAX_MB_BYTES];
2327 uint8_t bit_buf2[2][MAX_MB_BYTES];
2328 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2329 PutBitContext pb[2], pb2[2], tex_pb[2];
2331 ff_check_alignment();
2334 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2335 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2336 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2339 s->last_bits= put_bits_count(&s->pb);
2350 /* init last dc values */
2351 /* note: quant matrix value (8) is implied here */
2352 s->last_dc[i] = 128 << s->intra_dc_precision;
2354 s->current_picture.f.error[i] = 0;
2357 memset(s->last_mv, 0, sizeof(s->last_mv));
2361 switch(s->codec_id){
2362 case AV_CODEC_ID_H263:
2363 case AV_CODEC_ID_H263P:
2364 case AV_CODEC_ID_FLV1:
2365 if (CONFIG_H263_ENCODER)
2366 s->gob_index = ff_h263_get_gob_height(s);
2368 case AV_CODEC_ID_MPEG4:
2369 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2370 ff_mpeg4_init_partitions(s);
2376 s->first_slice_line = 1;
2377 s->ptr_lastgob = s->pb.buf;
2378 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2382 ff_set_qscale(s, s->qscale);
2383 ff_init_block_index(s);
2385 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2386 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2387 int mb_type= s->mb_type[xy];
2392 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2393 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2396 if(s->data_partitioning){
2397 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2398 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2399 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2405 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2406 ff_update_block_index(s);
2408 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2409 ff_h261_reorder_mb_index(s);
2410 xy= s->mb_y*s->mb_stride + s->mb_x;
2411 mb_type= s->mb_type[xy];
2414 /* write gob / video packet header */
2416 int current_packet_size, is_gob_start;
2418 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2420 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2422 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2424 switch(s->codec_id){
2425 case AV_CODEC_ID_H263:
2426 case AV_CODEC_ID_H263P:
2427 if(!s->h263_slice_structured)
2428 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2430 case AV_CODEC_ID_MPEG2VIDEO:
2431 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2432 case AV_CODEC_ID_MPEG1VIDEO:
2433 if(s->mb_skip_run) is_gob_start=0;
2438 if(s->start_mb_y != mb_y || mb_x!=0){
2441 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2442 ff_mpeg4_init_partitions(s);
2446 assert((put_bits_count(&s->pb)&7) == 0);
2447 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2449 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2450 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2451 int d = 100 / s->error_rate;
2453 current_packet_size=0;
2454 s->pb.buf_ptr= s->ptr_lastgob;
2455 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2459 if (s->avctx->rtp_callback){
2460 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2461 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2463 update_mb_info(s, 1);
2465 switch(s->codec_id){
2466 case AV_CODEC_ID_MPEG4:
2467 if (CONFIG_MPEG4_ENCODER) {
2468 ff_mpeg4_encode_video_packet_header(s);
2469 ff_mpeg4_clean_buffers(s);
2472 case AV_CODEC_ID_MPEG1VIDEO:
2473 case AV_CODEC_ID_MPEG2VIDEO:
2474 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2475 ff_mpeg1_encode_slice_header(s);
2476 ff_mpeg1_clean_buffers(s);
2479 case AV_CODEC_ID_H263:
2480 case AV_CODEC_ID_H263P:
2481 if (CONFIG_H263_ENCODER)
2482 ff_h263_encode_gob_header(s, mb_y);
2486 if(s->flags&CODEC_FLAG_PASS1){
2487 int bits= put_bits_count(&s->pb);
2488 s->misc_bits+= bits - s->last_bits;
2492 s->ptr_lastgob += current_packet_size;
2493 s->first_slice_line=1;
2494 s->resync_mb_x=mb_x;
2495 s->resync_mb_y=mb_y;
2499 if( (s->resync_mb_x == s->mb_x)
2500 && s->resync_mb_y+1 == s->mb_y){
2501 s->first_slice_line=0;
2505 s->dquant=0; //only for QP_RD
2507 update_mb_info(s, 0);
2509 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2511 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2513 copy_context_before_encode(&backup_s, s, -1);
2515 best_s.data_partitioning= s->data_partitioning;
2516 best_s.partitioned_frame= s->partitioned_frame;
2517 if(s->data_partitioning){
2518 backup_s.pb2= s->pb2;
2519 backup_s.tex_pb= s->tex_pb;
2522 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2523 s->mv_dir = MV_DIR_FORWARD;
2524 s->mv_type = MV_TYPE_16X16;
2526 s->mv[0][0][0] = s->p_mv_table[xy][0];
2527 s->mv[0][0][1] = s->p_mv_table[xy][1];
2528 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2529 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2531 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2532 s->mv_dir = MV_DIR_FORWARD;
2533 s->mv_type = MV_TYPE_FIELD;
2536 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2537 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2538 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2540 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2541 &dmin, &next_block, 0, 0);
2543 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2544 s->mv_dir = MV_DIR_FORWARD;
2545 s->mv_type = MV_TYPE_16X16;
2549 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2550 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2552 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2553 s->mv_dir = MV_DIR_FORWARD;
2554 s->mv_type = MV_TYPE_8X8;
2557 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2558 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2560 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2561 &dmin, &next_block, 0, 0);
2563 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2564 s->mv_dir = MV_DIR_FORWARD;
2565 s->mv_type = MV_TYPE_16X16;
2567 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2568 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2569 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2570 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2572 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2573 s->mv_dir = MV_DIR_BACKWARD;
2574 s->mv_type = MV_TYPE_16X16;
2576 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2577 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2578 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2579 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2581 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2582 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2583 s->mv_type = MV_TYPE_16X16;
2585 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2586 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2587 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2588 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2589 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2590 &dmin, &next_block, 0, 0);
2592 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2593 s->mv_dir = MV_DIR_FORWARD;
2594 s->mv_type = MV_TYPE_FIELD;
2597 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2598 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2599 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2601 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2602 &dmin, &next_block, 0, 0);
2604 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2605 s->mv_dir = MV_DIR_BACKWARD;
2606 s->mv_type = MV_TYPE_FIELD;
2609 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2610 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2611 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2613 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2614 &dmin, &next_block, 0, 0);
2616 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2617 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2618 s->mv_type = MV_TYPE_FIELD;
2620 for(dir=0; dir<2; dir++){
2622 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2623 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2624 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2627 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2628 &dmin, &next_block, 0, 0);
2630 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2632 s->mv_type = MV_TYPE_16X16;
2636 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2637 &dmin, &next_block, 0, 0);
2638 if(s->h263_pred || s->h263_aic){
2640 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2642 ff_clean_intra_table_entries(s); //old mode?
2646 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2647 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2648 const int last_qp= backup_s.qscale;
2651 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2652 static const int dquant_tab[4]={-1,1,-2,2};
2654 assert(backup_s.dquant == 0);
2657 s->mv_dir= best_s.mv_dir;
2658 s->mv_type = MV_TYPE_16X16;
2659 s->mb_intra= best_s.mb_intra;
2660 s->mv[0][0][0] = best_s.mv[0][0][0];
2661 s->mv[0][0][1] = best_s.mv[0][0][1];
2662 s->mv[1][0][0] = best_s.mv[1][0][0];
2663 s->mv[1][0][1] = best_s.mv[1][0][1];
2665 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2666 for(; qpi<4; qpi++){
2667 int dquant= dquant_tab[qpi];
2668 qp= last_qp + dquant;
2669 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2671 backup_s.dquant= dquant;
2672 if(s->mb_intra && s->dc_val[0]){
2674 dc[i]= s->dc_val[0][ s->block_index[i] ];
2675 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
2679 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2680 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2681 if(best_s.qscale != qp){
2682 if(s->mb_intra && s->dc_val[0]){
2684 s->dc_val[0][ s->block_index[i] ]= dc[i];
2685 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
2692 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2693 int mx= s->b_direct_mv_table[xy][0];
2694 int my= s->b_direct_mv_table[xy][1];
2696 backup_s.dquant = 0;
2697 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2699 ff_mpeg4_set_direct_mv(s, mx, my);
2700 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2701 &dmin, &next_block, mx, my);
2703 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2704 backup_s.dquant = 0;
2705 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2707 ff_mpeg4_set_direct_mv(s, 0, 0);
2708 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2709 &dmin, &next_block, 0, 0);
2711 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2714 coded |= s->block_last_index[i];
2717 memcpy(s->mv, best_s.mv, sizeof(s->mv));
2718 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2719 mx=my=0; //FIXME find the one we actually used
2720 ff_mpeg4_set_direct_mv(s, mx, my);
2721 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2729 s->mv_dir= best_s.mv_dir;
2730 s->mv_type = best_s.mv_type;
2732 /* s->mv[0][0][0] = best_s.mv[0][0][0];
2733 s->mv[0][0][1] = best_s.mv[0][0][1];
2734 s->mv[1][0][0] = best_s.mv[1][0][0];
2735 s->mv[1][0][1] = best_s.mv[1][0][1];*/
2738 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2739 &dmin, &next_block, mx, my);
2744 s->current_picture.qscale_table[xy] = best_s.qscale;
2746 copy_context_after_encode(s, &best_s, -1);
2748 pb_bits_count= put_bits_count(&s->pb);
2749 flush_put_bits(&s->pb);
2750 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2753 if(s->data_partitioning){
2754 pb2_bits_count= put_bits_count(&s->pb2);
2755 flush_put_bits(&s->pb2);
2756 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2757 s->pb2= backup_s.pb2;
2759 tex_pb_bits_count= put_bits_count(&s->tex_pb);
2760 flush_put_bits(&s->tex_pb);
2761 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2762 s->tex_pb= backup_s.tex_pb;
2764 s->last_bits= put_bits_count(&s->pb);
2766 if (CONFIG_H263_ENCODER &&
2767 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2768 ff_h263_update_motion_val(s);
2770 if(next_block==0){ //FIXME 16 vs linesize16
2771 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
2772 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
2773 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2776 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2777 ff_MPV_decode_mb(s, s->block);
2779 int motion_x = 0, motion_y = 0;
2780 s->mv_type=MV_TYPE_16X16;
2781 // only one MB-Type possible
2784 case CANDIDATE_MB_TYPE_INTRA:
2787 motion_x= s->mv[0][0][0] = 0;
2788 motion_y= s->mv[0][0][1] = 0;
2790 case CANDIDATE_MB_TYPE_INTER:
2791 s->mv_dir = MV_DIR_FORWARD;
2793 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2794 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2796 case CANDIDATE_MB_TYPE_INTER_I:
2797 s->mv_dir = MV_DIR_FORWARD;
2798 s->mv_type = MV_TYPE_FIELD;
2801 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2802 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2803 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2806 case CANDIDATE_MB_TYPE_INTER4V:
2807 s->mv_dir = MV_DIR_FORWARD;
2808 s->mv_type = MV_TYPE_8X8;
2811 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2812 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2815 case CANDIDATE_MB_TYPE_DIRECT:
2816 if (CONFIG_MPEG4_ENCODER) {
2817 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2819 motion_x=s->b_direct_mv_table[xy][0];
2820 motion_y=s->b_direct_mv_table[xy][1];
2821 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2824 case CANDIDATE_MB_TYPE_DIRECT0:
2825 if (CONFIG_MPEG4_ENCODER) {
2826 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2828 ff_mpeg4_set_direct_mv(s, 0, 0);
2831 case CANDIDATE_MB_TYPE_BIDIR:
2832 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2834 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2835 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2836 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2837 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2839 case CANDIDATE_MB_TYPE_BACKWARD:
2840 s->mv_dir = MV_DIR_BACKWARD;
2842 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2843 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2845 case CANDIDATE_MB_TYPE_FORWARD:
2846 s->mv_dir = MV_DIR_FORWARD;
2848 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2849 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2851 case CANDIDATE_MB_TYPE_FORWARD_I:
2852 s->mv_dir = MV_DIR_FORWARD;
2853 s->mv_type = MV_TYPE_FIELD;
2856 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2857 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2858 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2861 case CANDIDATE_MB_TYPE_BACKWARD_I:
2862 s->mv_dir = MV_DIR_BACKWARD;
2863 s->mv_type = MV_TYPE_FIELD;
2866 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2867 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2868 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2871 case CANDIDATE_MB_TYPE_BIDIR_I:
2872 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2873 s->mv_type = MV_TYPE_FIELD;
2875 for(dir=0; dir<2; dir++){
2877 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2878 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2879 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2884 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
2887 encode_mb(s, motion_x, motion_y);
2889 // RAL: Update last macroblock type
2890 s->last_mv_dir = s->mv_dir;
2892 if (CONFIG_H263_ENCODER &&
2893 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2894 ff_h263_update_motion_val(s);
2896 ff_MPV_decode_mb(s, s->block);
2899 /* clean the MV table in IPS frames for direct mode in B frames */
2900 if(s->mb_intra /* && I,P,S_TYPE */){
2901 s->p_mv_table[xy][0]=0;
2902 s->p_mv_table[xy][1]=0;
2905 if(s->flags&CODEC_FLAG_PSNR){
2909 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2910 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2912 s->current_picture.f.error[0] += sse(
2913 s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
2914 s->dest[0], w, h, s->linesize);
2915 s->current_picture.f.error[1] += sse(
2916 s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2917 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2918 s->current_picture.f.error[2] += sse(
2919 s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2920 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2923 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
2924 ff_h263_loop_filter(s);
2926 av_dlog(s->avctx, "MB %d %d bits\n",
2927 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
2931 //not beautiful here but we must write it before flushing so it has to be here
2932 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
2933 ff_msmpeg4_encode_ext_header(s);
2937 /* Send the last GOB if RTP */
2938 if (s->avctx->rtp_callback) {
2939 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
2940 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
2941 /* Call the RTP callback to send the last GOB */
2943 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
2949 #define MERGE(field) dst->field += src->field; src->field=0
2950 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
2951 MERGE(me.scene_change_score);
2952 MERGE(me.mc_mb_var_sum_temp);
2953 MERGE(me.mb_var_sum_temp);
2956 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
2959 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
2960 MERGE(dct_count[1]);
2969 MERGE(er.error_count);
2970 MERGE(padding_bug_score);
2971 MERGE(current_picture.f.error[0]);
2972 MERGE(current_picture.f.error[1]);
2973 MERGE(current_picture.f.error[2]);
2975 if(dst->avctx->noise_reduction){
2976 for(i=0; i<64; i++){
2977 MERGE(dct_error_sum[0][i]);
2978 MERGE(dct_error_sum[1][i]);
2982 assert(put_bits_count(&src->pb) % 8 ==0);
2983 assert(put_bits_count(&dst->pb) % 8 ==0);
2984 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
2985 flush_put_bits(&dst->pb);
2988 static int estimate_qp(MpegEncContext *s, int dry_run){
2989 if (s->next_lambda){
2990 s->current_picture_ptr->f.quality =
2991 s->current_picture.f.quality = s->next_lambda;
2992 if(!dry_run) s->next_lambda= 0;
2993 } else if (!s->fixed_qscale) {
2994 s->current_picture_ptr->f.quality =
2995 s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
2996 if (s->current_picture.f.quality < 0)
3000 if(s->adaptive_quant){
3001 switch(s->codec_id){
3002 case AV_CODEC_ID_MPEG4:
3003 if (CONFIG_MPEG4_ENCODER)
3004 ff_clean_mpeg4_qscales(s);
3006 case AV_CODEC_ID_H263:
3007 case AV_CODEC_ID_H263P:
3008 case AV_CODEC_ID_FLV1:
3009 if (CONFIG_H263_ENCODER)
3010 ff_clean_h263_qscales(s);
3013 ff_init_qscale_tab(s);
3016 s->lambda= s->lambda_table[0];
3019 s->lambda = s->current_picture.f.quality;
3024 /* must be called before writing the header */
3025 static void set_frame_distances(MpegEncContext * s){
3026 assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3027 s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3029 if(s->pict_type==AV_PICTURE_TYPE_B){
3030 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3031 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3033 s->pp_time= s->time - s->last_non_b_time;
3034 s->last_non_b_time= s->time;
3035 assert(s->picture_number==0 || s->pp_time > 0);
3039 static int encode_picture(MpegEncContext *s, int picture_number)
3043 int context_count = s->slice_context_count;
3045 s->picture_number = picture_number;
3047 /* Reset the average MB variance */
3048 s->me.mb_var_sum_temp =
3049 s->me.mc_mb_var_sum_temp = 0;
3051 /* we need to initialize some time vars before we can encode b-frames */
3052 // RAL: Condition added for MPEG1VIDEO
3053 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3054 set_frame_distances(s);
3055 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3056 ff_set_mpeg4_time(s);
3058 s->me.scene_change_score=0;
3060 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3062 if(s->pict_type==AV_PICTURE_TYPE_I){
3063 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3064 else s->no_rounding=0;
3065 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3066 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3067 s->no_rounding ^= 1;
3070 if(s->flags & CODEC_FLAG_PASS2){
3071 if (estimate_qp(s,1) < 0)
3073 ff_get_2pass_fcode(s);
3074 }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3075 if(s->pict_type==AV_PICTURE_TYPE_B)
3076 s->lambda= s->last_lambda_for[s->pict_type];
3078 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3082 s->mb_intra=0; //for the rate distortion & bit compare functions
3083 for(i=1; i<context_count; i++){
3084 ret = ff_update_duplicate_context(s->thread_context[i], s);
3092 /* Estimate motion for every MB */
3093 if(s->pict_type != AV_PICTURE_TYPE_I){
3094 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3095 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3096 if (s->pict_type != AV_PICTURE_TYPE_B) {
3097 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3098 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3102 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3103 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3105 for(i=0; i<s->mb_stride*s->mb_height; i++)
3106 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3108 if(!s->fixed_qscale){
3109 /* finding spatial complexity for I-frame rate control */
3110 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3113 for(i=1; i<context_count; i++){
3114 merge_context_after_me(s, s->thread_context[i]);
3116 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3117 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3120 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3121 s->pict_type= AV_PICTURE_TYPE_I;
3122 for(i=0; i<s->mb_stride*s->mb_height; i++)
3123 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3124 av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
3125 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3129 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3130 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3132 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3134 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3135 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3136 s->f_code= FFMAX3(s->f_code, a, b);
3139 ff_fix_long_p_mvs(s);
3140 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3141 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3145 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3146 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3151 if(s->pict_type==AV_PICTURE_TYPE_B){
3154 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3155 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3156 s->f_code = FFMAX(a, b);
3158 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3159 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3160 s->b_code = FFMAX(a, b);
3162 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3163 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3164 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3165 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3166 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3168 for(dir=0; dir<2; dir++){
3171 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3172 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3173 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3174 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3182 if (estimate_qp(s, 0) < 0)
3185 if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3186 s->qscale= 3; //reduce clipping problems
3188 if (s->out_format == FMT_MJPEG) {
3189 /* for mjpeg, we do include qscale in the matrix */
3191 int j= s->dsp.idct_permutation[i];
3193 s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3195 s->y_dc_scale_table=
3196 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3197 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3198 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3199 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3203 //FIXME var duplication
3204 s->current_picture_ptr->f.key_frame =
3205 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3206 s->current_picture_ptr->f.pict_type =
3207 s->current_picture.f.pict_type = s->pict_type;
3209 if (s->current_picture.f.key_frame)
3210 s->picture_in_gop_number=0;
3212 s->last_bits= put_bits_count(&s->pb);
3213 switch(s->out_format) {
3215 if (CONFIG_MJPEG_ENCODER)
3216 ff_mjpeg_encode_picture_header(s);
3219 if (CONFIG_H261_ENCODER)
3220 ff_h261_encode_picture_header(s, picture_number);
3223 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3224 ff_wmv2_encode_picture_header(s, picture_number);
3225 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3226 ff_msmpeg4_encode_picture_header(s, picture_number);
3227 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3228 ff_mpeg4_encode_picture_header(s, picture_number);
3229 else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3230 ff_rv10_encode_picture_header(s, picture_number);
3231 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3232 ff_rv20_encode_picture_header(s, picture_number);
3233 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3234 ff_flv_encode_picture_header(s, picture_number);
3235 else if (CONFIG_H263_ENCODER)
3236 ff_h263_encode_picture_header(s, picture_number);
3239 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3240 ff_mpeg1_encode_picture_header(s, picture_number);
3245 bits= put_bits_count(&s->pb);
3246 s->header_bits= bits - s->last_bits;
3248 for(i=1; i<context_count; i++){
3249 update_duplicate_context_after_me(s->thread_context[i], s);
3251 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3252 for(i=1; i<context_count; i++){
3253 merge_context_after_encode(s, s->thread_context[i]);
3259 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3260 const int intra= s->mb_intra;
3263 s->dct_count[intra]++;
3265 for(i=0; i<64; i++){
3266 int level= block[i];
3270 s->dct_error_sum[intra][i] += level;
3271 level -= s->dct_offset[intra][i];
3272 if(level<0) level=0;
3274 s->dct_error_sum[intra][i] -= level;
3275 level += s->dct_offset[intra][i];
3276 if(level>0) level=0;
3283 static int dct_quantize_trellis_c(MpegEncContext *s,
3284 int16_t *block, int n,
3285 int qscale, int *overflow){
3287 const uint8_t *scantable= s->intra_scantable.scantable;
3288 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3290 unsigned int threshold1, threshold2;
3302 int coeff_count[64];
3303 int qmul, qadd, start_i, last_non_zero, i, dc;
3304 const int esc_length= s->ac_esc_length;
3306 uint8_t * last_length;
3307 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3309 s->dsp.fdct (block);
3311 if(s->dct_error_sum)
3312 s->denoise_dct(s, block);
3314 qadd= ((qscale-1)|1)*8;
3325 /* For AIC we skip quant/dequant of INTRADC */
3330 /* note: block[0] is assumed to be positive */
3331 block[0] = (block[0] + (q >> 1)) / q;
3334 qmat = s->q_intra_matrix[qscale];
3335 if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3336 bias= 1<<(QMAT_SHIFT-1);
3337 length = s->intra_ac_vlc_length;
3338 last_length= s->intra_ac_vlc_last_length;
3342 qmat = s->q_inter_matrix[qscale];
3343 length = s->inter_ac_vlc_length;
3344 last_length= s->inter_ac_vlc_last_length;
3348 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3349 threshold2= (threshold1<<1);
3351 for(i=63; i>=start_i; i--) {
3352 const int j = scantable[i];
3353 int level = block[j] * qmat[j];
3355 if(((unsigned)(level+threshold1))>threshold2){
3361 for(i=start_i; i<=last_non_zero; i++) {
3362 const int j = scantable[i];
3363 int level = block[j] * qmat[j];
3365 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3366 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3367 if(((unsigned)(level+threshold1))>threshold2){
3369 level= (bias + level)>>QMAT_SHIFT;
3371 coeff[1][i]= level-1;
3372 // coeff[2][k]= level-2;
3374 level= (bias - level)>>QMAT_SHIFT;
3375 coeff[0][i]= -level;
3376 coeff[1][i]= -level+1;
3377 // coeff[2][k]= -level+2;
3379 coeff_count[i]= FFMIN(level, 2);
3380 assert(coeff_count[i]);
3383 coeff[0][i]= (level>>31)|1;
3388 *overflow= s->max_qcoeff < max; //overflow might have happened
3390 if(last_non_zero < start_i){
3391 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3392 return last_non_zero;
3395 score_tab[start_i]= 0;
3396 survivor[0]= start_i;
3399 for(i=start_i; i<=last_non_zero; i++){
3400 int level_index, j, zero_distortion;
3401 int dct_coeff= FFABS(block[ scantable[i] ]);
3402 int best_score=256*256*256*120;
3404 if (s->dsp.fdct == ff_fdct_ifast)
3405 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3406 zero_distortion= dct_coeff*dct_coeff;
3408 for(level_index=0; level_index < coeff_count[i]; level_index++){
3410 int level= coeff[level_index][i];
3411 const int alevel= FFABS(level);
3416 if(s->out_format == FMT_H263){
3417 unquant_coeff= alevel*qmul + qadd;
3419 j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3421 unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
3422 unquant_coeff = (unquant_coeff - 1) | 1;
3424 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3425 unquant_coeff = (unquant_coeff - 1) | 1;
3430 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3432 if((level&(~127)) == 0){
3433 for(j=survivor_count-1; j>=0; j--){
3434 int run= i - survivor[j];
3435 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3436 score += score_tab[i-run];
3438 if(score < best_score){
3441 level_tab[i+1]= level-64;
3445 if(s->out_format == FMT_H263){
3446 for(j=survivor_count-1; j>=0; j--){
3447 int run= i - survivor[j];
3448 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3449 score += score_tab[i-run];
3450 if(score < last_score){
3453 last_level= level-64;
3459 distortion += esc_length*lambda;
3460 for(j=survivor_count-1; j>=0; j--){
3461 int run= i - survivor[j];
3462 int score= distortion + score_tab[i-run];
3464 if(score < best_score){
3467 level_tab[i+1]= level-64;
3471 if(s->out_format == FMT_H263){
3472 for(j=survivor_count-1; j>=0; j--){
3473 int run= i - survivor[j];
3474 int score= distortion + score_tab[i-run];
3475 if(score < last_score){
3478 last_level= level-64;
3486 score_tab[i+1]= best_score;
3488 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3489 if(last_non_zero <= 27){
3490 for(; survivor_count; survivor_count--){
3491 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3495 for(; survivor_count; survivor_count--){
3496 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3501 survivor[ survivor_count++ ]= i+1;
3504 if(s->out_format != FMT_H263){
3505 last_score= 256*256*256*120;
3506 for(i= survivor[0]; i<=last_non_zero + 1; i++){
3507 int score= score_tab[i];
3508 if(i) score += lambda*2; //FIXME exacter?
3510 if(score < last_score){
3513 last_level= level_tab[i];
3514 last_run= run_tab[i];
3519 s->coded_score[n] = last_score;
3521 dc= FFABS(block[0]);
3522 last_non_zero= last_i - 1;
3523 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3525 if(last_non_zero < start_i)
3526 return last_non_zero;
3528 if(last_non_zero == 0 && start_i == 0){
3530 int best_score= dc * dc;
3532 for(i=0; i<coeff_count[0]; i++){
3533 int level= coeff[i][0];
3534 int alevel= FFABS(level);
3535 int unquant_coeff, score, distortion;
3537 if(s->out_format == FMT_H263){
3538 unquant_coeff= (alevel*qmul + qadd)>>3;
3540 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
3541 unquant_coeff = (unquant_coeff - 1) | 1;
3543 unquant_coeff = (unquant_coeff + 4) >> 3;
3544 unquant_coeff<<= 3 + 3;
3546 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
3548 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
3549 else score= distortion + esc_length*lambda;
3551 if(score < best_score){
3553 best_level= level - 64;
3556 block[0]= best_level;
3557 s->coded_score[n] = best_score - dc*dc;
3558 if(best_level == 0) return -1;
3559 else return last_non_zero;
3565 block[ perm_scantable[last_non_zero] ]= last_level;
3568 for(; i>start_i; i -= run_tab[i] + 1){
3569 block[ perm_scantable[i-1] ]= level_tab[i];
3572 return last_non_zero;
3575 //#define REFINE_STATS 1
3576 static int16_t basis[64][64];
3578 static void build_basis(uint8_t *perm){
3585 double s= 0.25*(1<<BASIS_SHIFT);
3587 int perm_index= perm[index];
3588 if(i==0) s*= sqrt(0.5);
3589 if(j==0) s*= sqrt(0.5);
3590 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
3597 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
3598 int16_t *block, int16_t *weight, int16_t *orig,
3601 LOCAL_ALIGNED_16(int16_t, d1, [64]);
3602 const uint8_t *scantable= s->intra_scantable.scantable;
3603 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3604 // unsigned int threshold1, threshold2;
3609 int qmul, qadd, start_i, last_non_zero, i, dc;
3611 uint8_t * last_length;
3613 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
3616 static int after_last=0;
3617 static int to_zero=0;
3618 static int from_zero=0;
3621 static int messed_sign=0;
3624 if(basis[0][0] == 0)
3625 build_basis(s->dsp.idct_permutation);
3636 /* For AIC we skip quant/dequant of INTRADC */
3640 q <<= RECON_SHIFT-3;
3641 /* note: block[0] is assumed to be positive */
3643 // block[0] = (block[0] + (q >> 1)) / q;
3645 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3646 // bias= 1<<(QMAT_SHIFT-1);
3647 length = s->intra_ac_vlc_length;
3648 last_length= s->intra_ac_vlc_last_length;
3652 length = s->inter_ac_vlc_length;
3653 last_length= s->inter_ac_vlc_last_length;
3655 last_non_zero = s->block_last_index[n];
3660 dc += (1<<(RECON_SHIFT-1));
3661 for(i=0; i<64; i++){
3662 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
3665 STOP_TIMER("memset rem[]")}
3668 for(i=0; i<64; i++){
3673 w= FFABS(weight[i]) + qns*one;
3674 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
3677 // w=weight[i] = (63*qns + (w/2)) / w;
3683 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
3689 for(i=start_i; i<=last_non_zero; i++){
3690 int j= perm_scantable[i];
3691 const int level= block[j];
3695 if(level<0) coeff= qmul*level - qadd;
3696 else coeff= qmul*level + qadd;
3697 run_tab[rle_index++]=run;
3700 s->dsp.add_8x8basis(rem, basis[j], coeff);
3706 if(last_non_zero>0){
3707 STOP_TIMER("init rem[]")
3714 int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
3717 int run2, best_unquant_change=0, analyze_gradient;
3721 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
3723 if(analyze_gradient){
3727 for(i=0; i<64; i++){
3730 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
3733 STOP_TIMER("rem*w*w")}
3743 const int level= block[0];
3744 int change, old_coeff;
3746 assert(s->mb_intra);
3750 for(change=-1; change<=1; change+=2){
3751 int new_level= level + change;
3752 int score, new_coeff;
3754 new_coeff= q*new_level;
3755 if(new_coeff >= 2048 || new_coeff < 0)
3758 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
3759 if(score<best_score){
3762 best_change= change;
3763 best_unquant_change= new_coeff - old_coeff;
3770 run2= run_tab[rle_index++];
3774 for(i=start_i; i<64; i++){
3775 int j= perm_scantable[i];
3776 const int level= block[j];
3777 int change, old_coeff;
3779 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
3783 if(level<0) old_coeff= qmul*level - qadd;
3784 else old_coeff= qmul*level + qadd;
3785 run2= run_tab[rle_index++]; //FIXME ! maybe after last
3789 assert(run2>=0 || i >= last_non_zero );
3792 for(change=-1; change<=1; change+=2){
3793 int new_level= level + change;
3794 int score, new_coeff, unquant_change;
3797 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
3801 if(new_level<0) new_coeff= qmul*new_level - qadd;
3802 else new_coeff= qmul*new_level + qadd;
3803 if(new_coeff >= 2048 || new_coeff <= -2048)
3805 //FIXME check for overflow
3808 if(level < 63 && level > -63){
3809 if(i < last_non_zero)
3810 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
3811 - length[UNI_AC_ENC_INDEX(run, level+64)];
3813 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
3814 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
3817 assert(FFABS(new_level)==1);
3819 if(analyze_gradient){
3820 int g= d1[ scantable[i] ];
3821 if(g && (g^new_level) >= 0)
3825 if(i < last_non_zero){
3826 int next_i= i + run2 + 1;
3827 int next_level= block[ perm_scantable[next_i] ] + 64;
3829 if(next_level&(~127))
3832 if(next_i < last_non_zero)
3833 score += length[UNI_AC_ENC_INDEX(run, 65)]
3834 + length[UNI_AC_ENC_INDEX(run2, next_level)]
3835 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3837 score += length[UNI_AC_ENC_INDEX(run, 65)]
3838 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3839 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3841 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
3843 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3844 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3850 assert(FFABS(level)==1);
3852 if(i < last_non_zero){
3853 int next_i= i + run2 + 1;
3854 int next_level= block[ perm_scantable[next_i] ] + 64;
3856 if(next_level&(~127))
3859 if(next_i < last_non_zero)
3860 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3861 - length[UNI_AC_ENC_INDEX(run2, next_level)]
3862 - length[UNI_AC_ENC_INDEX(run, 65)];
3864 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3865 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3866 - length[UNI_AC_ENC_INDEX(run, 65)];
3868 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
3870 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3871 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3878 unquant_change= new_coeff - old_coeff;
3879 assert((score < 100*lambda && score > -100*lambda) || lambda==0);
3881 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
3882 if(score<best_score){
3885 best_change= change;
3886 best_unquant_change= unquant_change;
3890 prev_level= level + 64;
3891 if(prev_level&(~127))
3900 STOP_TIMER("iterative step")}
3904 int j= perm_scantable[ best_coeff ];
3906 block[j] += best_change;
3908 if(best_coeff > last_non_zero){
3909 last_non_zero= best_coeff;
3917 if(block[j] - best_change){
3918 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
3930 for(; last_non_zero>=start_i; last_non_zero--){
3931 if(block[perm_scantable[last_non_zero]])
3937 if(256*256*256*64 % count == 0){
3938 printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
3943 for(i=start_i; i<=last_non_zero; i++){
3944 int j= perm_scantable[i];
3945 const int level= block[j];
3948 run_tab[rle_index++]=run;
3955 s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
3961 if(last_non_zero>0){
3962 STOP_TIMER("iterative search")
3967 return last_non_zero;
3970 int ff_dct_quantize_c(MpegEncContext *s,
3971 int16_t *block, int n,
3972 int qscale, int *overflow)
3974 int i, j, level, last_non_zero, q, start_i;
3976 const uint8_t *scantable= s->intra_scantable.scantable;
3979 unsigned int threshold1, threshold2;
3981 s->dsp.fdct (block);
3983 if(s->dct_error_sum)
3984 s->denoise_dct(s, block);
3994 /* For AIC we skip quant/dequant of INTRADC */
3997 /* note: block[0] is assumed to be positive */
3998 block[0] = (block[0] + (q >> 1)) / q;
4001 qmat = s->q_intra_matrix[qscale];
4002 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4006 qmat = s->q_inter_matrix[qscale];
4007 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4009 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4010 threshold2= (threshold1<<1);
4011 for(i=63;i>=start_i;i--) {
4013 level = block[j] * qmat[j];
4015 if(((unsigned)(level+threshold1))>threshold2){
4022 for(i=start_i; i<=last_non_zero; i++) {
4024 level = block[j] * qmat[j];
4026 // if( bias+level >= (1<<QMAT_SHIFT)
4027 // || bias-level >= (1<<QMAT_SHIFT)){
4028 if(((unsigned)(level+threshold1))>threshold2){
4030 level= (bias + level)>>QMAT_SHIFT;
4033 level= (bias - level)>>QMAT_SHIFT;
4041 *overflow= s->max_qcoeff < max; //overflow might have happened
4043 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4044 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4045 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4047 return last_non_zero;
4050 #define OFFSET(x) offsetof(MpegEncContext, x)
4051 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4052 static const AVOption h263_options[] = {
4053 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4054 { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4055 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4060 static const AVClass h263_class = {
4061 .class_name = "H.263 encoder",
4062 .item_name = av_default_item_name,
4063 .option = h263_options,
4064 .version = LIBAVUTIL_VERSION_INT,
4067 AVCodec ff_h263_encoder = {
4069 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4070 .type = AVMEDIA_TYPE_VIDEO,
4071 .id = AV_CODEC_ID_H263,
4072 .priv_data_size = sizeof(MpegEncContext),
4073 .init = ff_MPV_encode_init,
4074 .encode2 = ff_MPV_encode_picture,
4075 .close = ff_MPV_encode_end,
4076 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4077 .priv_class = &h263_class,
4080 static const AVOption h263p_options[] = {
4081 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4082 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4083 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4084 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4088 static const AVClass h263p_class = {
4089 .class_name = "H.263p encoder",
4090 .item_name = av_default_item_name,
4091 .option = h263p_options,
4092 .version = LIBAVUTIL_VERSION_INT,
4095 AVCodec ff_h263p_encoder = {
4097 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4098 .type = AVMEDIA_TYPE_VIDEO,
4099 .id = AV_CODEC_ID_H263P,
4100 .priv_data_size = sizeof(MpegEncContext),
4101 .init = ff_MPV_encode_init,
4102 .encode2 = ff_MPV_encode_picture,
4103 .close = ff_MPV_encode_end,
4104 .capabilities = CODEC_CAP_SLICE_THREADS,
4105 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4106 .priv_class = &h263p_class,
4109 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4111 AVCodec ff_msmpeg4v2_encoder = {
4112 .name = "msmpeg4v2",
4113 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4114 .type = AVMEDIA_TYPE_VIDEO,
4115 .id = AV_CODEC_ID_MSMPEG4V2,
4116 .priv_data_size = sizeof(MpegEncContext),
4117 .init = ff_MPV_encode_init,
4118 .encode2 = ff_MPV_encode_picture,
4119 .close = ff_MPV_encode_end,
4120 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4121 .priv_class = &msmpeg4v2_class,
4124 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4126 AVCodec ff_msmpeg4v3_encoder = {
4128 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4129 .type = AVMEDIA_TYPE_VIDEO,
4130 .id = AV_CODEC_ID_MSMPEG4V3,
4131 .priv_data_size = sizeof(MpegEncContext),
4132 .init = ff_MPV_encode_init,
4133 .encode2 = ff_MPV_encode_picture,
4134 .close = ff_MPV_encode_end,
4135 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4136 .priv_class = &msmpeg4v3_class,
4139 FF_MPV_GENERIC_CLASS(wmv1)
4141 AVCodec ff_wmv1_encoder = {
4143 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4144 .type = AVMEDIA_TYPE_VIDEO,
4145 .id = AV_CODEC_ID_WMV1,
4146 .priv_data_size = sizeof(MpegEncContext),
4147 .init = ff_MPV_encode_init,
4148 .encode2 = ff_MPV_encode_picture,
4149 .close = ff_MPV_encode_end,
4150 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4151 .priv_class = &wmv1_class,