2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/internal.h"
31 #include "libavutil/intmath.h"
32 #include "libavutil/mathematics.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/opt.h"
39 #include "mpegvideo.h"
47 #include "aandcttab.h"
49 #include "mpeg4video.h"
51 #include "bytestream.h"
54 static int encode_picture(MpegEncContext *s, int picture_number);
55 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
56 static int sse_mb(MpegEncContext *s);
57 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
58 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
60 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
61 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
63 const AVOption ff_mpv_generic_options[] = {
68 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
69 uint16_t (*qmat16)[2][64],
70 const uint16_t *quant_matrix,
71 int bias, int qmin, int qmax, int intra)
76 for (qscale = qmin; qscale <= qmax; qscale++) {
78 if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
79 dsp->fdct == ff_jpeg_fdct_islow_10 ||
80 dsp->fdct == ff_faandct) {
81 for (i = 0; i < 64; i++) {
82 const int j = dsp->idct_permutation[i];
83 /* 16 <= qscale * quant_matrix[i] <= 7905
84 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
85 * 19952 <= x <= 249205026
86 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
87 * 3444240 >= (1 << 36) / (x) >= 275 */
89 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
90 (qscale * quant_matrix[j]));
92 } else if (dsp->fdct == ff_fdct_ifast) {
93 for (i = 0; i < 64; i++) {
94 const int j = dsp->idct_permutation[i];
95 /* 16 <= qscale * quant_matrix[i] <= 7905
96 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
97 * 19952 <= x <= 249205026
98 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
99 * 3444240 >= (1 << 36) / (x) >= 275 */
101 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
102 (ff_aanscales[i] * qscale *
106 for (i = 0; i < 64; i++) {
107 const int j = dsp->idct_permutation[i];
108 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
109 * Assume x = qscale * quant_matrix[i]
111 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
112 * so 32768 >= (1 << 19) / (x) >= 67 */
113 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
114 (qscale * quant_matrix[j]));
115 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
116 // (qscale * quant_matrix[i]);
117 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
118 (qscale * quant_matrix[j]);
120 if (qmat16[qscale][0][i] == 0 ||
121 qmat16[qscale][0][i] == 128 * 256)
122 qmat16[qscale][0][i] = 128 * 256 - 1;
123 qmat16[qscale][1][i] =
124 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
125 qmat16[qscale][0][i]);
129 for (i = intra; i < 64; i++) {
131 if (dsp->fdct == ff_fdct_ifast) {
132 max = (8191LL * ff_aanscales[i]) >> 14;
134 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
140 av_log(NULL, AV_LOG_INFO,
141 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
146 static inline void update_qscale(MpegEncContext *s)
148 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
149 (FF_LAMBDA_SHIFT + 7);
150 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
152 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
156 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
162 for (i = 0; i < 64; i++) {
163 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
170 * init s->current_picture.qscale_table from s->lambda_table
172 void ff_init_qscale_tab(MpegEncContext *s)
174 int8_t * const qscale_table = s->current_picture.qscale_table;
177 for (i = 0; i < s->mb_num; i++) {
178 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
179 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
180 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
185 static void update_duplicate_context_after_me(MpegEncContext *dst,
188 #define COPY(a) dst->a= src->a
190 COPY(current_picture);
196 COPY(picture_in_gop_number);
197 COPY(gop_picture_number);
198 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
199 COPY(progressive_frame); // FIXME don't set in encode_header
200 COPY(partitioned_frame); // FIXME don't set in encode_header
205 * Set the given MpegEncContext to defaults for encoding.
206 * the changed fields will not depend upon the prior state of the MpegEncContext.
208 static void MPV_encode_defaults(MpegEncContext *s)
211 ff_MPV_common_defaults(s);
213 for (i = -16; i < 16; i++) {
214 default_fcode_tab[i + MAX_MV] = 1;
216 s->me.mv_penalty = default_mv_penalty;
217 s->fcode_tab = default_fcode_tab;
220 /* init video encoder */
221 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
223 MpegEncContext *s = avctx->priv_data;
225 int chroma_h_shift, chroma_v_shift;
227 MPV_encode_defaults(s);
229 switch (avctx->codec_id) {
230 case AV_CODEC_ID_MPEG2VIDEO:
231 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
232 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
233 av_log(avctx, AV_LOG_ERROR,
234 "only YUV420 and YUV422 are supported\n");
238 case AV_CODEC_ID_LJPEG:
239 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
240 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
241 avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
242 avctx->pix_fmt != AV_PIX_FMT_BGRA &&
243 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
244 avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
245 avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
246 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
247 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
251 case AV_CODEC_ID_MJPEG:
252 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
253 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
254 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
255 avctx->pix_fmt != AV_PIX_FMT_YUV422P) ||
256 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
257 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
262 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
263 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
268 switch (avctx->pix_fmt) {
269 case AV_PIX_FMT_YUVJ422P:
270 case AV_PIX_FMT_YUV422P:
271 s->chroma_format = CHROMA_422;
273 case AV_PIX_FMT_YUVJ420P:
274 case AV_PIX_FMT_YUV420P:
276 s->chroma_format = CHROMA_420;
280 s->bit_rate = avctx->bit_rate;
281 s->width = avctx->width;
282 s->height = avctx->height;
283 if (avctx->gop_size > 600 &&
284 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
285 av_log(avctx, AV_LOG_ERROR,
286 "Warning keyframe interval too large! reducing it ...\n");
287 avctx->gop_size = 600;
289 s->gop_size = avctx->gop_size;
291 s->flags = avctx->flags;
292 s->flags2 = avctx->flags2;
293 if (avctx->max_b_frames > MAX_B_FRAMES) {
294 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
295 "is %d.\n", MAX_B_FRAMES);
297 s->max_b_frames = avctx->max_b_frames;
298 s->codec_id = avctx->codec->id;
299 s->strict_std_compliance = avctx->strict_std_compliance;
300 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
301 s->mpeg_quant = avctx->mpeg_quant;
302 s->rtp_mode = !!avctx->rtp_payload_size;
303 s->intra_dc_precision = avctx->intra_dc_precision;
304 s->user_specified_pts = AV_NOPTS_VALUE;
306 if (s->gop_size <= 1) {
313 s->me_method = avctx->me_method;
316 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
318 s->adaptive_quant = (s->avctx->lumi_masking ||
319 s->avctx->dark_masking ||
320 s->avctx->temporal_cplx_masking ||
321 s->avctx->spatial_cplx_masking ||
322 s->avctx->p_masking ||
323 s->avctx->border_masking ||
324 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
327 s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
329 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
330 av_log(avctx, AV_LOG_ERROR,
331 "a vbv buffer size is needed, "
332 "for encoding with a maximum bitrate\n");
336 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
337 av_log(avctx, AV_LOG_INFO,
338 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
341 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
342 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
346 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
347 av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
351 if (avctx->rc_max_rate &&
352 avctx->rc_max_rate == avctx->bit_rate &&
353 avctx->rc_max_rate != avctx->rc_min_rate) {
354 av_log(avctx, AV_LOG_INFO,
355 "impossible bitrate constraints, this will fail\n");
358 if (avctx->rc_buffer_size &&
359 avctx->bit_rate * (int64_t)avctx->time_base.num >
360 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
361 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
365 if (!s->fixed_qscale &&
366 avctx->bit_rate * av_q2d(avctx->time_base) >
367 avctx->bit_rate_tolerance) {
368 av_log(avctx, AV_LOG_ERROR,
369 "bitrate tolerance too small for bitrate\n");
373 if (s->avctx->rc_max_rate &&
374 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
375 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
376 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
377 90000LL * (avctx->rc_buffer_size - 1) >
378 s->avctx->rc_max_rate * 0xFFFFLL) {
379 av_log(avctx, AV_LOG_INFO,
380 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
381 "specified vbv buffer is too large for the given bitrate!\n");
384 if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
385 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
386 s->codec_id != AV_CODEC_ID_FLV1) {
387 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
391 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
392 av_log(avctx, AV_LOG_ERROR,
393 "OBMC is only supported with simple mb decision\n");
397 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
398 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
402 if (s->max_b_frames &&
403 s->codec_id != AV_CODEC_ID_MPEG4 &&
404 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
405 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
406 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
410 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
411 s->codec_id == AV_CODEC_ID_H263 ||
412 s->codec_id == AV_CODEC_ID_H263P) &&
413 (avctx->sample_aspect_ratio.num > 255 ||
414 avctx->sample_aspect_ratio.den > 255)) {
415 av_log(avctx, AV_LOG_ERROR,
416 "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
417 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
421 if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
422 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
423 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
427 // FIXME mpeg2 uses that too
428 if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
429 av_log(avctx, AV_LOG_ERROR,
430 "mpeg2 style quantization not supported by codec\n");
434 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
435 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
439 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
440 s->avctx->mb_decision != FF_MB_DECISION_RD) {
441 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
445 if (s->avctx->scenechange_threshold < 1000000000 &&
446 (s->flags & CODEC_FLAG_CLOSED_GOP)) {
447 av_log(avctx, AV_LOG_ERROR,
448 "closed gop with scene change detection are not supported yet, "
449 "set threshold to 1000000000\n");
453 if (s->flags & CODEC_FLAG_LOW_DELAY) {
454 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
455 av_log(avctx, AV_LOG_ERROR,
456 "low delay forcing is only available for mpeg2\n");
459 if (s->max_b_frames != 0) {
460 av_log(avctx, AV_LOG_ERROR,
461 "b frames cannot be used with low delay\n");
466 if (s->q_scale_type == 1) {
467 if (avctx->qmax > 12) {
468 av_log(avctx, AV_LOG_ERROR,
469 "non linear quant only supports qmax <= 12 currently\n");
474 if (s->avctx->thread_count > 1 &&
475 s->codec_id != AV_CODEC_ID_MPEG4 &&
476 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
477 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
478 (s->codec_id != AV_CODEC_ID_H263P)) {
479 av_log(avctx, AV_LOG_ERROR,
480 "multi threaded encoding not supported by codec\n");
484 if (s->avctx->thread_count < 1) {
485 av_log(avctx, AV_LOG_ERROR,
486 "automatic thread number detection not supported by codec,"
491 if (s->avctx->thread_count > 1)
494 if (!avctx->time_base.den || !avctx->time_base.num) {
495 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
499 i = (INT_MAX / 2 + 128) >> 8;
500 if (avctx->mb_threshold >= i) {
501 av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
506 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
507 av_log(avctx, AV_LOG_INFO,
508 "notice: b_frame_strategy only affects the first pass\n");
509 avctx->b_frame_strategy = 0;
512 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
514 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
515 avctx->time_base.den /= i;
516 avctx->time_base.num /= i;
520 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
521 s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
522 // (a + x * 3 / 8) / x
523 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
524 s->inter_quant_bias = 0;
526 s->intra_quant_bias = 0;
528 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
531 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
532 s->intra_quant_bias = avctx->intra_quant_bias;
533 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
534 s->inter_quant_bias = avctx->inter_quant_bias;
536 av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
539 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
540 s->avctx->time_base.den > (1 << 16) - 1) {
541 av_log(avctx, AV_LOG_ERROR,
542 "timebase %d/%d not supported by MPEG 4 standard, "
543 "the maximum admitted value for the timebase denominator "
544 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
548 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
550 switch (avctx->codec->id) {
551 case AV_CODEC_ID_MPEG1VIDEO:
552 s->out_format = FMT_MPEG1;
553 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
554 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
556 case AV_CODEC_ID_MPEG2VIDEO:
557 s->out_format = FMT_MPEG1;
558 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
559 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
562 case AV_CODEC_ID_LJPEG:
563 case AV_CODEC_ID_MJPEG:
564 s->out_format = FMT_MJPEG;
565 s->intra_only = 1; /* force intra only for jpeg */
566 if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
567 avctx->pix_fmt == AV_PIX_FMT_BGRA) {
568 s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
569 s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
570 s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
572 s->mjpeg_vsample[0] = 2;
573 s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
574 s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
575 s->mjpeg_hsample[0] = 2;
576 s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
577 s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
579 if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
580 ff_mjpeg_encode_init(s) < 0)
585 case AV_CODEC_ID_H261:
586 if (!CONFIG_H261_ENCODER)
588 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
589 av_log(avctx, AV_LOG_ERROR,
590 "The specified picture size of %dx%d is not valid for the "
591 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
592 s->width, s->height);
595 s->out_format = FMT_H261;
599 case AV_CODEC_ID_H263:
600 if (!CONFIG_H263_ENCODER)
602 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
603 s->width, s->height) == 8) {
604 av_log(avctx, AV_LOG_INFO,
605 "The specified picture size of %dx%d is not valid for "
606 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
607 "352x288, 704x576, and 1408x1152."
608 "Try H.263+.\n", s->width, s->height);
611 s->out_format = FMT_H263;
615 case AV_CODEC_ID_H263P:
616 s->out_format = FMT_H263;
619 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
620 s->modified_quant = s->h263_aic;
621 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
622 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
625 /* These are just to be sure */
629 case AV_CODEC_ID_FLV1:
630 s->out_format = FMT_H263;
631 s->h263_flv = 2; /* format = 1; 11-bit codes */
632 s->unrestricted_mv = 1;
633 s->rtp_mode = 0; /* don't allow GOB */
637 case AV_CODEC_ID_RV10:
638 s->out_format = FMT_H263;
642 case AV_CODEC_ID_RV20:
643 s->out_format = FMT_H263;
646 s->modified_quant = 1;
650 s->unrestricted_mv = 0;
652 case AV_CODEC_ID_MPEG4:
653 s->out_format = FMT_H263;
655 s->unrestricted_mv = 1;
656 s->low_delay = s->max_b_frames ? 0 : 1;
657 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
659 case AV_CODEC_ID_MSMPEG4V2:
660 s->out_format = FMT_H263;
662 s->unrestricted_mv = 1;
663 s->msmpeg4_version = 2;
667 case AV_CODEC_ID_MSMPEG4V3:
668 s->out_format = FMT_H263;
670 s->unrestricted_mv = 1;
671 s->msmpeg4_version = 3;
672 s->flipflop_rounding = 1;
676 case AV_CODEC_ID_WMV1:
677 s->out_format = FMT_H263;
679 s->unrestricted_mv = 1;
680 s->msmpeg4_version = 4;
681 s->flipflop_rounding = 1;
685 case AV_CODEC_ID_WMV2:
686 s->out_format = FMT_H263;
688 s->unrestricted_mv = 1;
689 s->msmpeg4_version = 5;
690 s->flipflop_rounding = 1;
698 avctx->has_b_frames = !s->low_delay;
702 s->progressive_frame =
703 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
704 CODEC_FLAG_INTERLACED_ME) ||
708 if (ff_MPV_common_init(s) < 0)
712 ff_MPV_encode_init_x86(s);
714 ff_h263dsp_init(&s->h263dsp);
715 if (!s->dct_quantize)
716 s->dct_quantize = ff_dct_quantize_c;
718 s->denoise_dct = denoise_dct_c;
719 s->fast_dct_quantize = s->dct_quantize;
721 s->dct_quantize = dct_quantize_trellis_c;
723 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
724 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
726 s->quant_precision = 5;
728 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
729 ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
731 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
732 ff_h261_encode_init(s);
733 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
734 ff_h263_encode_init(s);
735 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
736 ff_msmpeg4_encode_init(s);
737 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
738 && s->out_format == FMT_MPEG1)
739 ff_mpeg1_encode_init(s);
742 for (i = 0; i < 64; i++) {
743 int j = s->dsp.idct_permutation[i];
744 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
746 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
747 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
748 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
750 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
753 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
754 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
756 if (s->avctx->intra_matrix)
757 s->intra_matrix[j] = s->avctx->intra_matrix[i];
758 if (s->avctx->inter_matrix)
759 s->inter_matrix[j] = s->avctx->inter_matrix[i];
762 /* precompute matrix */
763 /* for mjpeg, we do include qscale in the matrix */
764 if (s->out_format != FMT_MJPEG) {
765 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
766 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
768 ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
769 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
773 if (ff_rate_control_init(s) < 0)
776 #if FF_API_ERROR_RATE
777 FF_DISABLE_DEPRECATION_WARNINGS
778 if (avctx->error_rate)
779 s->error_rate = avctx->error_rate;
780 FF_ENABLE_DEPRECATION_WARNINGS;
783 if (avctx->b_frame_strategy == 2) {
784 for (i = 0; i < s->max_b_frames + 2; i++) {
785 s->tmp_frames[i] = av_frame_alloc();
786 if (!s->tmp_frames[i])
787 return AVERROR(ENOMEM);
789 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
790 s->tmp_frames[i]->width = s->width >> avctx->brd_scale;
791 s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
793 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
802 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
804 MpegEncContext *s = avctx->priv_data;
807 ff_rate_control_uninit(s);
809 ff_MPV_common_end(s);
810 if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
811 s->out_format == FMT_MJPEG)
812 ff_mjpeg_encode_close(s);
814 av_freep(&avctx->extradata);
816 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
817 av_frame_free(&s->tmp_frames[i]);
822 static int get_sae(uint8_t *src, int ref, int stride)
827 for (y = 0; y < 16; y++) {
828 for (x = 0; x < 16; x++) {
829 acc += FFABS(src[x + y * stride] - ref);
836 static int get_intra_count(MpegEncContext *s, uint8_t *src,
837 uint8_t *ref, int stride)
845 for (y = 0; y < h; y += 16) {
846 for (x = 0; x < w; x += 16) {
847 int offset = x + y * stride;
848 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
850 int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
851 int sae = get_sae(src + offset, mean, stride);
853 acc += sae + 500 < sad;
860 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
864 int i, display_picture_number = 0, ret;
865 const int encoding_delay = s->max_b_frames ? s->max_b_frames :
866 (s->low_delay ? 0 : 1);
871 display_picture_number = s->input_picture_number++;
873 if (pts != AV_NOPTS_VALUE) {
874 if (s->user_specified_pts != AV_NOPTS_VALUE) {
876 int64_t last = s->user_specified_pts;
879 av_log(s->avctx, AV_LOG_ERROR,
880 "Error, Invalid timestamp=%"PRId64", "
881 "last=%"PRId64"\n", pts, s->user_specified_pts);
885 if (!s->low_delay && display_picture_number == 1)
886 s->dts_delta = time - last;
888 s->user_specified_pts = pts;
890 if (s->user_specified_pts != AV_NOPTS_VALUE) {
891 s->user_specified_pts =
892 pts = s->user_specified_pts + 1;
893 av_log(s->avctx, AV_LOG_INFO,
894 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
897 pts = display_picture_number;
903 if (!pic_arg->buf[0]);
905 if (pic_arg->linesize[0] != s->linesize)
907 if (pic_arg->linesize[1] != s->uvlinesize)
909 if (pic_arg->linesize[2] != s->uvlinesize)
912 av_dlog(s->avctx, "%d %d %td %td\n", pic_arg->linesize[0],
913 pic_arg->linesize[1], s->linesize, s->uvlinesize);
916 i = ff_find_unused_picture(s, 1);
920 pic = &s->picture[i];
923 if ((ret = av_frame_ref(&pic->f, pic_arg)) < 0)
925 if (ff_alloc_picture(s, pic, 1) < 0) {
929 i = ff_find_unused_picture(s, 0);
933 pic = &s->picture[i];
936 if (ff_alloc_picture(s, pic, 0) < 0) {
940 if (pic->f.data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
941 pic->f.data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
942 pic->f.data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
945 int h_chroma_shift, v_chroma_shift;
946 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
950 for (i = 0; i < 3; i++) {
951 int src_stride = pic_arg->linesize[i];
952 int dst_stride = i ? s->uvlinesize : s->linesize;
953 int h_shift = i ? h_chroma_shift : 0;
954 int v_shift = i ? v_chroma_shift : 0;
955 int w = s->width >> h_shift;
956 int h = s->height >> v_shift;
957 uint8_t *src = pic_arg->data[i];
958 uint8_t *dst = pic->f.data[i];
960 if (!s->avctx->rc_buffer_size)
961 dst += INPLACE_OFFSET;
963 if (src_stride == dst_stride)
964 memcpy(dst, src, src_stride * h);
975 ret = av_frame_copy_props(&pic->f, pic_arg);
979 pic->f.display_picture_number = display_picture_number;
980 pic->f.pts = pts; // we set this here to avoid modifiying pic_arg
983 /* shift buffer entries */
984 for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
985 s->input_picture[i - 1] = s->input_picture[i];
987 s->input_picture[encoding_delay] = (Picture*) pic;
992 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
998 for (plane = 0; plane < 3; plane++) {
999 const int stride = p->f.linesize[plane];
1000 const int bw = plane ? 1 : 2;
1001 for (y = 0; y < s->mb_height * bw; y++) {
1002 for (x = 0; x < s->mb_width * bw; x++) {
1003 int off = p->shared ? 0 : 16;
1004 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
1005 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
1006 int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1008 switch (s->avctx->frame_skip_exp) {
1009 case 0: score = FFMAX(score, v); break;
1010 case 1: score += FFABS(v); break;
1011 case 2: score += v * v; break;
1012 case 3: score64 += FFABS(v * v * (int64_t)v); break;
1013 case 4: score64 += v * v * (int64_t)(v * v); break;
1022 if (score64 < s->avctx->frame_skip_threshold)
1024 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1029 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1031 AVPacket pkt = { 0 };
1032 int ret, got_output;
1034 av_init_packet(&pkt);
1035 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1040 av_free_packet(&pkt);
1044 static int estimate_best_b_count(MpegEncContext *s)
1046 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1047 AVCodecContext *c = avcodec_alloc_context3(NULL);
1048 const int scale = s->avctx->brd_scale;
1049 int i, j, out_size, p_lambda, b_lambda, lambda2;
1050 int64_t best_rd = INT64_MAX;
1051 int best_b_count = -1;
1053 assert(scale >= 0 && scale <= 3);
1056 //s->next_picture_ptr->quality;
1057 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1058 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1059 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1060 if (!b_lambda) // FIXME we should do this somewhere else
1061 b_lambda = p_lambda;
1062 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1065 c->width = s->width >> scale;
1066 c->height = s->height >> scale;
1067 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
1068 CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1069 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1070 c->mb_decision = s->avctx->mb_decision;
1071 c->me_cmp = s->avctx->me_cmp;
1072 c->mb_cmp = s->avctx->mb_cmp;
1073 c->me_sub_cmp = s->avctx->me_sub_cmp;
1074 c->pix_fmt = AV_PIX_FMT_YUV420P;
1075 c->time_base = s->avctx->time_base;
1076 c->max_b_frames = s->max_b_frames;
1078 if (avcodec_open2(c, codec, NULL) < 0)
1081 for (i = 0; i < s->max_b_frames + 2; i++) {
1082 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1083 s->next_picture_ptr;
1085 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1086 pre_input = *pre_input_ptr;
1088 if (!pre_input.shared && i) {
1089 pre_input.f.data[0] += INPLACE_OFFSET;
1090 pre_input.f.data[1] += INPLACE_OFFSET;
1091 pre_input.f.data[2] += INPLACE_OFFSET;
1094 s->dsp.shrink[scale](s->tmp_frames[i]->data[0], s->tmp_frames[i]->linesize[0],
1095 pre_input.f.data[0], pre_input.f.linesize[0],
1096 c->width, c->height);
1097 s->dsp.shrink[scale](s->tmp_frames[i]->data[1], s->tmp_frames[i]->linesize[1],
1098 pre_input.f.data[1], pre_input.f.linesize[1],
1099 c->width >> 1, c->height >> 1);
1100 s->dsp.shrink[scale](s->tmp_frames[i]->data[2], s->tmp_frames[i]->linesize[2],
1101 pre_input.f.data[2], pre_input.f.linesize[2],
1102 c->width >> 1, c->height >> 1);
1106 for (j = 0; j < s->max_b_frames + 1; j++) {
1109 if (!s->input_picture[j])
1112 c->error[0] = c->error[1] = c->error[2] = 0;
1114 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1115 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1117 out_size = encode_frame(c, s->tmp_frames[0]);
1119 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1121 for (i = 0; i < s->max_b_frames + 1; i++) {
1122 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1124 s->tmp_frames[i + 1]->pict_type = is_p ?
1125 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1126 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1128 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1130 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1133 /* get the delayed frames */
1135 out_size = encode_frame(c, NULL);
1136 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1139 rd += c->error[0] + c->error[1] + c->error[2];
1150 return best_b_count;
1153 static int select_input_picture(MpegEncContext *s)
1157 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1158 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1159 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1161 /* set next picture type & ordering */
1162 if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1163 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1164 s->next_picture_ptr == NULL || s->intra_only) {
1165 s->reordered_input_picture[0] = s->input_picture[0];
1166 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1167 s->reordered_input_picture[0]->f.coded_picture_number =
1168 s->coded_picture_number++;
1172 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1173 if (s->picture_in_gop_number < s->gop_size &&
1174 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1175 // FIXME check that te gop check above is +-1 correct
1176 av_frame_unref(&s->input_picture[0]->f);
1179 ff_vbv_update(s, 0);
1185 if (s->flags & CODEC_FLAG_PASS2) {
1186 for (i = 0; i < s->max_b_frames + 1; i++) {
1187 int pict_num = s->input_picture[0]->f.display_picture_number + i;
1189 if (pict_num >= s->rc_context.num_entries)
1191 if (!s->input_picture[i]) {
1192 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1196 s->input_picture[i]->f.pict_type =
1197 s->rc_context.entry[pict_num].new_pict_type;
1201 if (s->avctx->b_frame_strategy == 0) {
1202 b_frames = s->max_b_frames;
1203 while (b_frames && !s->input_picture[b_frames])
1205 } else if (s->avctx->b_frame_strategy == 1) {
1206 for (i = 1; i < s->max_b_frames + 1; i++) {
1207 if (s->input_picture[i] &&
1208 s->input_picture[i]->b_frame_score == 0) {
1209 s->input_picture[i]->b_frame_score =
1211 s->input_picture[i ]->f.data[0],
1212 s->input_picture[i - 1]->f.data[0],
1216 for (i = 0; i < s->max_b_frames + 1; i++) {
1217 if (s->input_picture[i] == NULL ||
1218 s->input_picture[i]->b_frame_score - 1 >
1219 s->mb_num / s->avctx->b_sensitivity)
1223 b_frames = FFMAX(0, i - 1);
1226 for (i = 0; i < b_frames + 1; i++) {
1227 s->input_picture[i]->b_frame_score = 0;
1229 } else if (s->avctx->b_frame_strategy == 2) {
1230 b_frames = estimate_best_b_count(s);
1232 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1238 for (i = b_frames - 1; i >= 0; i--) {
1239 int type = s->input_picture[i]->f.pict_type;
1240 if (type && type != AV_PICTURE_TYPE_B)
1243 if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1244 b_frames == s->max_b_frames) {
1245 av_log(s->avctx, AV_LOG_ERROR,
1246 "warning, too many b frames in a row\n");
1249 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1250 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1251 s->gop_size > s->picture_in_gop_number) {
1252 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1254 if (s->flags & CODEC_FLAG_CLOSED_GOP)
1256 s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1260 if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1261 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1264 s->reordered_input_picture[0] = s->input_picture[b_frames];
1265 if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1266 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1267 s->reordered_input_picture[0]->f.coded_picture_number =
1268 s->coded_picture_number++;
1269 for (i = 0; i < b_frames; i++) {
1270 s->reordered_input_picture[i + 1] = s->input_picture[i];
1271 s->reordered_input_picture[i + 1]->f.pict_type =
1273 s->reordered_input_picture[i + 1]->f.coded_picture_number =
1274 s->coded_picture_number++;
1279 if (s->reordered_input_picture[0]) {
1280 s->reordered_input_picture[0]->reference =
1281 s->reordered_input_picture[0]->f.pict_type !=
1282 AV_PICTURE_TYPE_B ? 3 : 0;
1284 ff_mpeg_unref_picture(s, &s->new_picture);
1285 if ((ret = ff_mpeg_ref_picture(s, &s->new_picture, s->reordered_input_picture[0])))
1288 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1289 // input is a shared pix, so we can't modifiy it -> alloc a new
1290 // one & ensure that the shared one is reuseable
1293 int i = ff_find_unused_picture(s, 0);
1296 pic = &s->picture[i];
1298 pic->reference = s->reordered_input_picture[0]->reference;
1299 if (ff_alloc_picture(s, pic, 0) < 0) {
1303 ret = av_frame_copy_props(&pic->f, &s->reordered_input_picture[0]->f);
1307 /* mark us unused / free shared pic */
1308 av_frame_unref(&s->reordered_input_picture[0]->f);
1309 s->reordered_input_picture[0]->shared = 0;
1311 s->current_picture_ptr = pic;
1313 // input is not a shared pix -> reuse buffer for current_pix
1314 s->current_picture_ptr = s->reordered_input_picture[0];
1315 for (i = 0; i < 4; i++) {
1316 s->new_picture.f.data[i] += INPLACE_OFFSET;
1319 ff_mpeg_unref_picture(s, &s->current_picture);
1320 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1321 s->current_picture_ptr)) < 0)
1324 s->picture_number = s->new_picture.f.display_picture_number;
1326 ff_mpeg_unref_picture(s, &s->new_picture);
1331 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1332 const AVFrame *pic_arg, int *got_packet)
1334 MpegEncContext *s = avctx->priv_data;
1335 int i, stuffing_count, ret;
1336 int context_count = s->slice_context_count;
1338 s->picture_in_gop_number++;
1340 if (load_input_picture(s, pic_arg) < 0)
1343 if (select_input_picture(s) < 0) {
1348 if (s->new_picture.f.data[0]) {
1350 (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1353 s->mb_info_ptr = av_packet_new_side_data(pkt,
1354 AV_PKT_DATA_H263_MB_INFO,
1355 s->mb_width*s->mb_height*12);
1356 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1359 for (i = 0; i < context_count; i++) {
1360 int start_y = s->thread_context[i]->start_mb_y;
1361 int end_y = s->thread_context[i]-> end_mb_y;
1362 int h = s->mb_height;
1363 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1364 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1366 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1369 s->pict_type = s->new_picture.f.pict_type;
1371 ff_MPV_frame_start(s, avctx);
1373 if (encode_picture(s, s->picture_number) < 0)
1376 avctx->header_bits = s->header_bits;
1377 avctx->mv_bits = s->mv_bits;
1378 avctx->misc_bits = s->misc_bits;
1379 avctx->i_tex_bits = s->i_tex_bits;
1380 avctx->p_tex_bits = s->p_tex_bits;
1381 avctx->i_count = s->i_count;
1382 // FIXME f/b_count in avctx
1383 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1384 avctx->skip_count = s->skip_count;
1386 ff_MPV_frame_end(s);
1388 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1389 ff_mjpeg_encode_picture_trailer(s);
1391 if (avctx->rc_buffer_size) {
1392 RateControlContext *rcc = &s->rc_context;
1393 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1395 if (put_bits_count(&s->pb) > max_size &&
1396 s->lambda < s->avctx->lmax) {
1397 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1398 (s->qscale + 1) / s->qscale);
1399 if (s->adaptive_quant) {
1401 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1402 s->lambda_table[i] =
1403 FFMAX(s->lambda_table[i] + 1,
1404 s->lambda_table[i] * (s->qscale + 1) /
1407 s->mb_skipped = 0; // done in MPV_frame_start()
1408 // done in encode_picture() so we must undo it
1409 if (s->pict_type == AV_PICTURE_TYPE_P) {
1410 if (s->flipflop_rounding ||
1411 s->codec_id == AV_CODEC_ID_H263P ||
1412 s->codec_id == AV_CODEC_ID_MPEG4)
1413 s->no_rounding ^= 1;
1415 if (s->pict_type != AV_PICTURE_TYPE_B) {
1416 s->time_base = s->last_time_base;
1417 s->last_non_b_time = s->time - s->pp_time;
1419 for (i = 0; i < context_count; i++) {
1420 PutBitContext *pb = &s->thread_context[i]->pb;
1421 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1426 assert(s->avctx->rc_max_rate);
1429 if (s->flags & CODEC_FLAG_PASS1)
1430 ff_write_pass1_stats(s);
1432 for (i = 0; i < 4; i++) {
1433 s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
1434 avctx->error[i] += s->current_picture_ptr->f.error[i];
1437 if (s->flags & CODEC_FLAG_PASS1)
1438 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1439 avctx->i_tex_bits + avctx->p_tex_bits ==
1440 put_bits_count(&s->pb));
1441 flush_put_bits(&s->pb);
1442 s->frame_bits = put_bits_count(&s->pb);
1444 stuffing_count = ff_vbv_update(s, s->frame_bits);
1445 if (stuffing_count) {
1446 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1447 stuffing_count + 50) {
1448 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1452 switch (s->codec_id) {
1453 case AV_CODEC_ID_MPEG1VIDEO:
1454 case AV_CODEC_ID_MPEG2VIDEO:
1455 while (stuffing_count--) {
1456 put_bits(&s->pb, 8, 0);
1459 case AV_CODEC_ID_MPEG4:
1460 put_bits(&s->pb, 16, 0);
1461 put_bits(&s->pb, 16, 0x1C3);
1462 stuffing_count -= 4;
1463 while (stuffing_count--) {
1464 put_bits(&s->pb, 8, 0xFF);
1468 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1470 flush_put_bits(&s->pb);
1471 s->frame_bits = put_bits_count(&s->pb);
1474 /* update mpeg1/2 vbv_delay for CBR */
1475 if (s->avctx->rc_max_rate &&
1476 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1477 s->out_format == FMT_MPEG1 &&
1478 90000LL * (avctx->rc_buffer_size - 1) <=
1479 s->avctx->rc_max_rate * 0xFFFFLL) {
1480 int vbv_delay, min_delay;
1481 double inbits = s->avctx->rc_max_rate *
1482 av_q2d(s->avctx->time_base);
1483 int minbits = s->frame_bits - 8 *
1484 (s->vbv_delay_ptr - s->pb.buf - 1);
1485 double bits = s->rc_context.buffer_index + minbits - inbits;
1488 av_log(s->avctx, AV_LOG_ERROR,
1489 "Internal error, negative bits\n");
1491 assert(s->repeat_first_field == 0);
1493 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1494 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1495 s->avctx->rc_max_rate;
1497 vbv_delay = FFMAX(vbv_delay, min_delay);
1499 assert(vbv_delay < 0xFFFF);
1501 s->vbv_delay_ptr[0] &= 0xF8;
1502 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1503 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1504 s->vbv_delay_ptr[2] &= 0x07;
1505 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1506 avctx->vbv_delay = vbv_delay * 300;
1508 s->total_bits += s->frame_bits;
1509 avctx->frame_bits = s->frame_bits;
1511 pkt->pts = s->current_picture.f.pts;
1512 if (!s->low_delay) {
1513 if (!s->current_picture.f.coded_picture_number)
1514 pkt->dts = pkt->pts - s->dts_delta;
1516 pkt->dts = s->reordered_pts;
1517 s->reordered_pts = s->input_picture[0]->f.pts;
1519 pkt->dts = pkt->pts;
1520 if (s->current_picture.f.key_frame)
1521 pkt->flags |= AV_PKT_FLAG_KEY;
1523 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1527 assert((s->frame_bits & 7) == 0);
1529 pkt->size = s->frame_bits / 8;
1530 *got_packet = !!pkt->size;
1534 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1535 int n, int threshold)
1537 static const char tab[64] = {
1538 3, 2, 2, 1, 1, 1, 1, 1,
1539 1, 1, 1, 1, 1, 1, 1, 1,
1540 1, 1, 1, 1, 1, 1, 1, 1,
1541 0, 0, 0, 0, 0, 0, 0, 0,
1542 0, 0, 0, 0, 0, 0, 0, 0,
1543 0, 0, 0, 0, 0, 0, 0, 0,
1544 0, 0, 0, 0, 0, 0, 0, 0,
1545 0, 0, 0, 0, 0, 0, 0, 0
1550 int16_t *block = s->block[n];
1551 const int last_index = s->block_last_index[n];
1554 if (threshold < 0) {
1556 threshold = -threshold;
1560 /* Are all we could set to zero already zero? */
1561 if (last_index <= skip_dc - 1)
1564 for (i = 0; i <= last_index; i++) {
1565 const int j = s->intra_scantable.permutated[i];
1566 const int level = FFABS(block[j]);
1568 if (skip_dc && i == 0)
1572 } else if (level > 1) {
1578 if (score >= threshold)
1580 for (i = skip_dc; i <= last_index; i++) {
1581 const int j = s->intra_scantable.permutated[i];
1585 s->block_last_index[n] = 0;
1587 s->block_last_index[n] = -1;
1590 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1594 const int maxlevel = s->max_qcoeff;
1595 const int minlevel = s->min_qcoeff;
1599 i = 1; // skip clipping of intra dc
1603 for (; i <= last_index; i++) {
1604 const int j = s->intra_scantable.permutated[i];
1605 int level = block[j];
1607 if (level > maxlevel) {
1610 } else if (level < minlevel) {
1618 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1619 av_log(s->avctx, AV_LOG_INFO,
1620 "warning, clipping %d dct coefficients to %d..%d\n",
1621 overflow, minlevel, maxlevel);
1624 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1628 for (y = 0; y < 8; y++) {
1629 for (x = 0; x < 8; x++) {
1635 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1636 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1637 int v = ptr[x2 + y2 * stride];
1643 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1648 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1649 int motion_x, int motion_y,
1650 int mb_block_height,
1653 int16_t weight[8][64];
1654 int16_t orig[8][64];
1655 const int mb_x = s->mb_x;
1656 const int mb_y = s->mb_y;
1659 int dct_offset = s->linesize * 8; // default for progressive frames
1660 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1661 ptrdiff_t wrap_y, wrap_c;
1663 for (i = 0; i < mb_block_count; i++)
1664 skip_dct[i] = s->skipdct;
1666 if (s->adaptive_quant) {
1667 const int last_qp = s->qscale;
1668 const int mb_xy = mb_x + mb_y * s->mb_stride;
1670 s->lambda = s->lambda_table[mb_xy];
1673 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1674 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
1675 s->dquant = s->qscale - last_qp;
1677 if (s->out_format == FMT_H263) {
1678 s->dquant = av_clip(s->dquant, -2, 2);
1680 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1682 if (s->pict_type == AV_PICTURE_TYPE_B) {
1683 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1686 if (s->mv_type == MV_TYPE_8X8)
1692 ff_set_qscale(s, last_qp + s->dquant);
1693 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1694 ff_set_qscale(s, s->qscale + s->dquant);
1696 wrap_y = s->linesize;
1697 wrap_c = s->uvlinesize;
1698 ptr_y = s->new_picture.f.data[0] +
1699 (mb_y * 16 * wrap_y) + mb_x * 16;
1700 ptr_cb = s->new_picture.f.data[1] +
1701 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1702 ptr_cr = s->new_picture.f.data[2] +
1703 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1705 if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1706 uint8_t *ebuf = s->edge_emu_buffer + 32;
1707 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
1709 16, 16, mb_x * 16, mb_y * 16,
1710 s->width, s->height);
1712 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb,
1714 8, mb_block_height, mb_x * 8, mb_y * 8,
1715 s->width >> 1, s->height >> 1);
1716 ptr_cb = ebuf + 18 * wrap_y;
1717 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr,
1719 8, mb_block_height, mb_x * 8, mb_y * 8,
1720 s->width >> 1, s->height >> 1);
1721 ptr_cr = ebuf + 18 * wrap_y + 8;
1725 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1726 int progressive_score, interlaced_score;
1728 s->interlaced_dct = 0;
1729 progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1731 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1732 NULL, wrap_y, 8) - 400;
1734 if (progressive_score > 0) {
1735 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1736 NULL, wrap_y * 2, 8) +
1737 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1738 NULL, wrap_y * 2, 8);
1739 if (progressive_score > interlaced_score) {
1740 s->interlaced_dct = 1;
1742 dct_offset = wrap_y;
1744 if (s->chroma_format == CHROMA_422)
1750 s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
1751 s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
1752 s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
1753 s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1755 if (s->flags & CODEC_FLAG_GRAY) {
1759 s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1760 s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1761 if (!s->chroma_y_shift) { /* 422 */
1762 s->dsp.get_pixels(s->block[6],
1763 ptr_cb + (dct_offset >> 1), wrap_c);
1764 s->dsp.get_pixels(s->block[7],
1765 ptr_cr + (dct_offset >> 1), wrap_c);
1769 op_pixels_func (*op_pix)[4];
1770 qpel_mc_func (*op_qpix)[16];
1771 uint8_t *dest_y, *dest_cb, *dest_cr;
1773 dest_y = s->dest[0];
1774 dest_cb = s->dest[1];
1775 dest_cr = s->dest[2];
1777 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1778 op_pix = s->hdsp.put_pixels_tab;
1779 op_qpix = s->dsp.put_qpel_pixels_tab;
1781 op_pix = s->hdsp.put_no_rnd_pixels_tab;
1782 op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1785 if (s->mv_dir & MV_DIR_FORWARD) {
1786 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1787 s->last_picture.f.data,
1789 op_pix = s->hdsp.avg_pixels_tab;
1790 op_qpix = s->dsp.avg_qpel_pixels_tab;
1792 if (s->mv_dir & MV_DIR_BACKWARD) {
1793 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1794 s->next_picture.f.data,
1798 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1799 int progressive_score, interlaced_score;
1801 s->interlaced_dct = 0;
1802 progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1805 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1806 ptr_y + wrap_y * 8, wrap_y,
1809 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1810 progressive_score -= 400;
1812 if (progressive_score > 0) {
1813 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1816 s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1820 if (progressive_score > interlaced_score) {
1821 s->interlaced_dct = 1;
1823 dct_offset = wrap_y;
1825 if (s->chroma_format == CHROMA_422)
1831 s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1832 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1833 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1834 dest_y + dct_offset, wrap_y);
1835 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1836 dest_y + dct_offset + 8, wrap_y);
1838 if (s->flags & CODEC_FLAG_GRAY) {
1842 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1843 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1844 if (!s->chroma_y_shift) { /* 422 */
1845 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
1846 dest_cb + (dct_offset >> 1), wrap_c);
1847 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
1848 dest_cr + (dct_offset >> 1), wrap_c);
1851 /* pre quantization */
1852 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1853 2 * s->qscale * s->qscale) {
1855 if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1856 wrap_y, 8) < 20 * s->qscale)
1858 if (s->dsp.sad[1](NULL, ptr_y + 8,
1859 dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1861 if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1862 dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1864 if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1865 dest_y + dct_offset + 8,
1866 wrap_y, 8) < 20 * s->qscale)
1868 if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1869 wrap_c, 8) < 20 * s->qscale)
1871 if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1872 wrap_c, 8) < 20 * s->qscale)
1874 if (!s->chroma_y_shift) { /* 422 */
1875 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
1876 dest_cb + (dct_offset >> 1),
1877 wrap_c, 8) < 20 * s->qscale)
1879 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
1880 dest_cr + (dct_offset >> 1),
1881 wrap_c, 8) < 20 * s->qscale)
1887 if (s->quantizer_noise_shaping) {
1889 get_visual_weight(weight[0], ptr_y , wrap_y);
1891 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
1893 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
1895 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
1897 get_visual_weight(weight[4], ptr_cb , wrap_c);
1899 get_visual_weight(weight[5], ptr_cr , wrap_c);
1900 if (!s->chroma_y_shift) { /* 422 */
1902 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
1905 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
1908 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
1911 /* DCT & quantize */
1912 assert(s->out_format != FMT_MJPEG || s->qscale == 8);
1914 for (i = 0; i < mb_block_count; i++) {
1917 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1918 // FIXME we could decide to change to quantizer instead of
1920 // JS: I don't think that would be a good idea it could lower
1921 // quality instead of improve it. Just INTRADC clipping
1922 // deserves changes in quantizer
1924 clip_coeffs(s, s->block[i], s->block_last_index[i]);
1926 s->block_last_index[i] = -1;
1928 if (s->quantizer_noise_shaping) {
1929 for (i = 0; i < mb_block_count; i++) {
1931 s->block_last_index[i] =
1932 dct_quantize_refine(s, s->block[i], weight[i],
1933 orig[i], i, s->qscale);
1938 if (s->luma_elim_threshold && !s->mb_intra)
1939 for (i = 0; i < 4; i++)
1940 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
1941 if (s->chroma_elim_threshold && !s->mb_intra)
1942 for (i = 4; i < mb_block_count; i++)
1943 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
1945 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
1946 for (i = 0; i < mb_block_count; i++) {
1947 if (s->block_last_index[i] == -1)
1948 s->coded_score[i] = INT_MAX / 256;
1953 if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
1954 s->block_last_index[4] =
1955 s->block_last_index[5] = 0;
1957 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
1960 // non c quantize code returns incorrect block_last_index FIXME
1961 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
1962 for (i = 0; i < mb_block_count; i++) {
1964 if (s->block_last_index[i] > 0) {
1965 for (j = 63; j > 0; j--) {
1966 if (s->block[i][s->intra_scantable.permutated[j]])
1969 s->block_last_index[i] = j;
1974 /* huffman encode */
1975 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
1976 case AV_CODEC_ID_MPEG1VIDEO:
1977 case AV_CODEC_ID_MPEG2VIDEO:
1978 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1979 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
1981 case AV_CODEC_ID_MPEG4:
1982 if (CONFIG_MPEG4_ENCODER)
1983 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
1985 case AV_CODEC_ID_MSMPEG4V2:
1986 case AV_CODEC_ID_MSMPEG4V3:
1987 case AV_CODEC_ID_WMV1:
1988 if (CONFIG_MSMPEG4_ENCODER)
1989 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
1991 case AV_CODEC_ID_WMV2:
1992 if (CONFIG_WMV2_ENCODER)
1993 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
1995 case AV_CODEC_ID_H261:
1996 if (CONFIG_H261_ENCODER)
1997 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
1999 case AV_CODEC_ID_H263:
2000 case AV_CODEC_ID_H263P:
2001 case AV_CODEC_ID_FLV1:
2002 case AV_CODEC_ID_RV10:
2003 case AV_CODEC_ID_RV20:
2004 if (CONFIG_H263_ENCODER)
2005 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2007 case AV_CODEC_ID_MJPEG:
2008 if (CONFIG_MJPEG_ENCODER)
2009 ff_mjpeg_encode_mb(s, s->block);
2016 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2018 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
2019 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
2022 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2025 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2028 d->mb_skip_run= s->mb_skip_run;
2030 d->last_dc[i] = s->last_dc[i];
2033 d->mv_bits= s->mv_bits;
2034 d->i_tex_bits= s->i_tex_bits;
2035 d->p_tex_bits= s->p_tex_bits;
2036 d->i_count= s->i_count;
2037 d->f_count= s->f_count;
2038 d->b_count= s->b_count;
2039 d->skip_count= s->skip_count;
2040 d->misc_bits= s->misc_bits;
2044 d->qscale= s->qscale;
2045 d->dquant= s->dquant;
2047 d->esc3_level_length= s->esc3_level_length;
2050 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2053 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2054 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2057 d->mb_skip_run= s->mb_skip_run;
2059 d->last_dc[i] = s->last_dc[i];
2062 d->mv_bits= s->mv_bits;
2063 d->i_tex_bits= s->i_tex_bits;
2064 d->p_tex_bits= s->p_tex_bits;
2065 d->i_count= s->i_count;
2066 d->f_count= s->f_count;
2067 d->b_count= s->b_count;
2068 d->skip_count= s->skip_count;
2069 d->misc_bits= s->misc_bits;
2071 d->mb_intra= s->mb_intra;
2072 d->mb_skipped= s->mb_skipped;
2073 d->mv_type= s->mv_type;
2074 d->mv_dir= s->mv_dir;
2076 if(s->data_partitioning){
2078 d->tex_pb= s->tex_pb;
2082 d->block_last_index[i]= s->block_last_index[i];
2083 d->interlaced_dct= s->interlaced_dct;
2084 d->qscale= s->qscale;
2086 d->esc3_level_length= s->esc3_level_length;
2089 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2090 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2091 int *dmin, int *next_block, int motion_x, int motion_y)
2094 uint8_t *dest_backup[3];
2096 copy_context_before_encode(s, backup, type);
2098 s->block= s->blocks[*next_block];
2099 s->pb= pb[*next_block];
2100 if(s->data_partitioning){
2101 s->pb2 = pb2 [*next_block];
2102 s->tex_pb= tex_pb[*next_block];
2106 memcpy(dest_backup, s->dest, sizeof(s->dest));
2107 s->dest[0] = s->rd_scratchpad;
2108 s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2109 s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2110 assert(s->linesize >= 32); //FIXME
2113 encode_mb(s, motion_x, motion_y);
2115 score= put_bits_count(&s->pb);
2116 if(s->data_partitioning){
2117 score+= put_bits_count(&s->pb2);
2118 score+= put_bits_count(&s->tex_pb);
2121 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2122 ff_MPV_decode_mb(s, s->block);
2124 score *= s->lambda2;
2125 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2129 memcpy(s->dest, dest_backup, sizeof(s->dest));
2136 copy_context_after_encode(best, s, type);
2140 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2141 uint32_t *sq = ff_squareTbl + 256;
2146 return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2147 else if(w==8 && h==8)
2148 return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2152 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2161 static int sse_mb(MpegEncContext *s){
2165 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2166 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2169 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2170 return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2171 +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2172 +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2174 return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2175 +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2176 +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2179 return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2180 +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2181 +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2184 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2185 MpegEncContext *s= *(void**)arg;
2189 s->me.dia_size= s->avctx->pre_dia_size;
2190 s->first_slice_line=1;
2191 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2192 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2193 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2195 s->first_slice_line=0;
2203 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2204 MpegEncContext *s= *(void**)arg;
2206 ff_check_alignment();
2208 s->me.dia_size= s->avctx->dia_size;
2209 s->first_slice_line=1;
2210 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2211 s->mb_x=0; //for block init below
2212 ff_init_block_index(s);
2213 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2214 s->block_index[0]+=2;
2215 s->block_index[1]+=2;
2216 s->block_index[2]+=2;
2217 s->block_index[3]+=2;
2219 /* compute motion vector & mb_type and store in context */
2220 if(s->pict_type==AV_PICTURE_TYPE_B)
2221 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2223 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2225 s->first_slice_line=0;
2230 static int mb_var_thread(AVCodecContext *c, void *arg){
2231 MpegEncContext *s= *(void**)arg;
2234 ff_check_alignment();
2236 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2237 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2240 uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2242 int sum = s->dsp.pix_sum(pix, s->linesize);
2244 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2246 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2247 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2248 s->me.mb_var_sum_temp += varc;
2254 static void write_slice_end(MpegEncContext *s){
2255 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2256 if(s->partitioned_frame){
2257 ff_mpeg4_merge_partitions(s);
2260 ff_mpeg4_stuffing(&s->pb);
2261 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2262 ff_mjpeg_encode_stuffing(&s->pb);
2265 avpriv_align_put_bits(&s->pb);
2266 flush_put_bits(&s->pb);
2268 if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2269 s->misc_bits+= get_bits_diff(s);
2272 static void write_mb_info(MpegEncContext *s)
2274 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2275 int offset = put_bits_count(&s->pb);
2276 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2277 int gobn = s->mb_y / s->gob_index;
2279 if (CONFIG_H263_ENCODER)
2280 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2281 bytestream_put_le32(&ptr, offset);
2282 bytestream_put_byte(&ptr, s->qscale);
2283 bytestream_put_byte(&ptr, gobn);
2284 bytestream_put_le16(&ptr, mba);
2285 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2286 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2287 /* 4MV not implemented */
2288 bytestream_put_byte(&ptr, 0); /* hmv2 */
2289 bytestream_put_byte(&ptr, 0); /* vmv2 */
2292 static void update_mb_info(MpegEncContext *s, int startcode)
2296 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2297 s->mb_info_size += 12;
2298 s->prev_mb_info = s->last_mb_info;
2301 s->prev_mb_info = put_bits_count(&s->pb)/8;
2302 /* This might have incremented mb_info_size above, and we return without
2303 * actually writing any info into that slot yet. But in that case,
2304 * this will be called again at the start of the after writing the
2305 * start code, actually writing the mb info. */
2309 s->last_mb_info = put_bits_count(&s->pb)/8;
2310 if (!s->mb_info_size)
2311 s->mb_info_size += 12;
2315 static int encode_thread(AVCodecContext *c, void *arg){
2316 MpegEncContext *s= *(void**)arg;
2317 int mb_x, mb_y, pdif = 0;
2318 int chr_h= 16>>s->chroma_y_shift;
2320 MpegEncContext best_s, backup_s;
2321 uint8_t bit_buf[2][MAX_MB_BYTES];
2322 uint8_t bit_buf2[2][MAX_MB_BYTES];
2323 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2324 PutBitContext pb[2], pb2[2], tex_pb[2];
2326 ff_check_alignment();
2329 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2330 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2331 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2334 s->last_bits= put_bits_count(&s->pb);
2345 /* init last dc values */
2346 /* note: quant matrix value (8) is implied here */
2347 s->last_dc[i] = 128 << s->intra_dc_precision;
2349 s->current_picture.f.error[i] = 0;
2352 memset(s->last_mv, 0, sizeof(s->last_mv));
2356 switch(s->codec_id){
2357 case AV_CODEC_ID_H263:
2358 case AV_CODEC_ID_H263P:
2359 case AV_CODEC_ID_FLV1:
2360 if (CONFIG_H263_ENCODER)
2361 s->gob_index = ff_h263_get_gob_height(s);
2363 case AV_CODEC_ID_MPEG4:
2364 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2365 ff_mpeg4_init_partitions(s);
2371 s->first_slice_line = 1;
2372 s->ptr_lastgob = s->pb.buf;
2373 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2377 ff_set_qscale(s, s->qscale);
2378 ff_init_block_index(s);
2380 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2381 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2382 int mb_type= s->mb_type[xy];
2387 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2388 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2391 if(s->data_partitioning){
2392 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2393 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2394 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2400 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2401 ff_update_block_index(s);
2403 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2404 ff_h261_reorder_mb_index(s);
2405 xy= s->mb_y*s->mb_stride + s->mb_x;
2406 mb_type= s->mb_type[xy];
2409 /* write gob / video packet header */
2411 int current_packet_size, is_gob_start;
2413 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2415 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2417 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2419 switch(s->codec_id){
2420 case AV_CODEC_ID_H263:
2421 case AV_CODEC_ID_H263P:
2422 if(!s->h263_slice_structured)
2423 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2425 case AV_CODEC_ID_MPEG2VIDEO:
2426 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2427 case AV_CODEC_ID_MPEG1VIDEO:
2428 if(s->mb_skip_run) is_gob_start=0;
2433 if(s->start_mb_y != mb_y || mb_x!=0){
2436 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2437 ff_mpeg4_init_partitions(s);
2441 assert((put_bits_count(&s->pb)&7) == 0);
2442 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2444 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2445 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2446 int d = 100 / s->error_rate;
2448 current_packet_size=0;
2449 s->pb.buf_ptr= s->ptr_lastgob;
2450 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2454 if (s->avctx->rtp_callback){
2455 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2456 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2458 update_mb_info(s, 1);
2460 switch(s->codec_id){
2461 case AV_CODEC_ID_MPEG4:
2462 if (CONFIG_MPEG4_ENCODER) {
2463 ff_mpeg4_encode_video_packet_header(s);
2464 ff_mpeg4_clean_buffers(s);
2467 case AV_CODEC_ID_MPEG1VIDEO:
2468 case AV_CODEC_ID_MPEG2VIDEO:
2469 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2470 ff_mpeg1_encode_slice_header(s);
2471 ff_mpeg1_clean_buffers(s);
2474 case AV_CODEC_ID_H263:
2475 case AV_CODEC_ID_H263P:
2476 if (CONFIG_H263_ENCODER)
2477 ff_h263_encode_gob_header(s, mb_y);
2481 if(s->flags&CODEC_FLAG_PASS1){
2482 int bits= put_bits_count(&s->pb);
2483 s->misc_bits+= bits - s->last_bits;
2487 s->ptr_lastgob += current_packet_size;
2488 s->first_slice_line=1;
2489 s->resync_mb_x=mb_x;
2490 s->resync_mb_y=mb_y;
2494 if( (s->resync_mb_x == s->mb_x)
2495 && s->resync_mb_y+1 == s->mb_y){
2496 s->first_slice_line=0;
2500 s->dquant=0; //only for QP_RD
2502 update_mb_info(s, 0);
2504 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2506 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2508 copy_context_before_encode(&backup_s, s, -1);
2510 best_s.data_partitioning= s->data_partitioning;
2511 best_s.partitioned_frame= s->partitioned_frame;
2512 if(s->data_partitioning){
2513 backup_s.pb2= s->pb2;
2514 backup_s.tex_pb= s->tex_pb;
2517 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2518 s->mv_dir = MV_DIR_FORWARD;
2519 s->mv_type = MV_TYPE_16X16;
2521 s->mv[0][0][0] = s->p_mv_table[xy][0];
2522 s->mv[0][0][1] = s->p_mv_table[xy][1];
2523 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2524 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2526 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2527 s->mv_dir = MV_DIR_FORWARD;
2528 s->mv_type = MV_TYPE_FIELD;
2531 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2532 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2533 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2535 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2536 &dmin, &next_block, 0, 0);
2538 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2539 s->mv_dir = MV_DIR_FORWARD;
2540 s->mv_type = MV_TYPE_16X16;
2544 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2545 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2547 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2548 s->mv_dir = MV_DIR_FORWARD;
2549 s->mv_type = MV_TYPE_8X8;
2552 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2553 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2555 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2556 &dmin, &next_block, 0, 0);
2558 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2559 s->mv_dir = MV_DIR_FORWARD;
2560 s->mv_type = MV_TYPE_16X16;
2562 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2563 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2564 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2565 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2567 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2568 s->mv_dir = MV_DIR_BACKWARD;
2569 s->mv_type = MV_TYPE_16X16;
2571 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2572 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2573 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2574 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2576 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2577 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2578 s->mv_type = MV_TYPE_16X16;
2580 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2581 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2582 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2583 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2584 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2585 &dmin, &next_block, 0, 0);
2587 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2588 s->mv_dir = MV_DIR_FORWARD;
2589 s->mv_type = MV_TYPE_FIELD;
2592 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2593 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2594 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2596 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2597 &dmin, &next_block, 0, 0);
2599 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2600 s->mv_dir = MV_DIR_BACKWARD;
2601 s->mv_type = MV_TYPE_FIELD;
2604 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2605 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2606 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2608 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2609 &dmin, &next_block, 0, 0);
2611 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2612 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2613 s->mv_type = MV_TYPE_FIELD;
2615 for(dir=0; dir<2; dir++){
2617 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2618 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2619 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2622 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2623 &dmin, &next_block, 0, 0);
2625 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2627 s->mv_type = MV_TYPE_16X16;
2631 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2632 &dmin, &next_block, 0, 0);
2633 if(s->h263_pred || s->h263_aic){
2635 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2637 ff_clean_intra_table_entries(s); //old mode?
2641 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2642 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2643 const int last_qp= backup_s.qscale;
2646 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2647 static const int dquant_tab[4]={-1,1,-2,2};
2649 assert(backup_s.dquant == 0);
2652 s->mv_dir= best_s.mv_dir;
2653 s->mv_type = MV_TYPE_16X16;
2654 s->mb_intra= best_s.mb_intra;
2655 s->mv[0][0][0] = best_s.mv[0][0][0];
2656 s->mv[0][0][1] = best_s.mv[0][0][1];
2657 s->mv[1][0][0] = best_s.mv[1][0][0];
2658 s->mv[1][0][1] = best_s.mv[1][0][1];
2660 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2661 for(; qpi<4; qpi++){
2662 int dquant= dquant_tab[qpi];
2663 qp= last_qp + dquant;
2664 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2666 backup_s.dquant= dquant;
2667 if(s->mb_intra && s->dc_val[0]){
2669 dc[i]= s->dc_val[0][ s->block_index[i] ];
2670 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
2674 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2675 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2676 if(best_s.qscale != qp){
2677 if(s->mb_intra && s->dc_val[0]){
2679 s->dc_val[0][ s->block_index[i] ]= dc[i];
2680 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
2687 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2688 int mx= s->b_direct_mv_table[xy][0];
2689 int my= s->b_direct_mv_table[xy][1];
2691 backup_s.dquant = 0;
2692 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2694 ff_mpeg4_set_direct_mv(s, mx, my);
2695 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2696 &dmin, &next_block, mx, my);
2698 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2699 backup_s.dquant = 0;
2700 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2702 ff_mpeg4_set_direct_mv(s, 0, 0);
2703 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2704 &dmin, &next_block, 0, 0);
2706 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2709 coded |= s->block_last_index[i];
2712 memcpy(s->mv, best_s.mv, sizeof(s->mv));
2713 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2714 mx=my=0; //FIXME find the one we actually used
2715 ff_mpeg4_set_direct_mv(s, mx, my);
2716 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2724 s->mv_dir= best_s.mv_dir;
2725 s->mv_type = best_s.mv_type;
2727 /* s->mv[0][0][0] = best_s.mv[0][0][0];
2728 s->mv[0][0][1] = best_s.mv[0][0][1];
2729 s->mv[1][0][0] = best_s.mv[1][0][0];
2730 s->mv[1][0][1] = best_s.mv[1][0][1];*/
2733 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2734 &dmin, &next_block, mx, my);
2739 s->current_picture.qscale_table[xy] = best_s.qscale;
2741 copy_context_after_encode(s, &best_s, -1);
2743 pb_bits_count= put_bits_count(&s->pb);
2744 flush_put_bits(&s->pb);
2745 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2748 if(s->data_partitioning){
2749 pb2_bits_count= put_bits_count(&s->pb2);
2750 flush_put_bits(&s->pb2);
2751 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2752 s->pb2= backup_s.pb2;
2754 tex_pb_bits_count= put_bits_count(&s->tex_pb);
2755 flush_put_bits(&s->tex_pb);
2756 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2757 s->tex_pb= backup_s.tex_pb;
2759 s->last_bits= put_bits_count(&s->pb);
2761 if (CONFIG_H263_ENCODER &&
2762 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2763 ff_h263_update_motion_val(s);
2765 if(next_block==0){ //FIXME 16 vs linesize16
2766 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
2767 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
2768 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2771 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2772 ff_MPV_decode_mb(s, s->block);
2774 int motion_x = 0, motion_y = 0;
2775 s->mv_type=MV_TYPE_16X16;
2776 // only one MB-Type possible
2779 case CANDIDATE_MB_TYPE_INTRA:
2782 motion_x= s->mv[0][0][0] = 0;
2783 motion_y= s->mv[0][0][1] = 0;
2785 case CANDIDATE_MB_TYPE_INTER:
2786 s->mv_dir = MV_DIR_FORWARD;
2788 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2789 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2791 case CANDIDATE_MB_TYPE_INTER_I:
2792 s->mv_dir = MV_DIR_FORWARD;
2793 s->mv_type = MV_TYPE_FIELD;
2796 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2797 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2798 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2801 case CANDIDATE_MB_TYPE_INTER4V:
2802 s->mv_dir = MV_DIR_FORWARD;
2803 s->mv_type = MV_TYPE_8X8;
2806 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2807 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2810 case CANDIDATE_MB_TYPE_DIRECT:
2811 if (CONFIG_MPEG4_ENCODER) {
2812 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2814 motion_x=s->b_direct_mv_table[xy][0];
2815 motion_y=s->b_direct_mv_table[xy][1];
2816 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2819 case CANDIDATE_MB_TYPE_DIRECT0:
2820 if (CONFIG_MPEG4_ENCODER) {
2821 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2823 ff_mpeg4_set_direct_mv(s, 0, 0);
2826 case CANDIDATE_MB_TYPE_BIDIR:
2827 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2829 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2830 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2831 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2832 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2834 case CANDIDATE_MB_TYPE_BACKWARD:
2835 s->mv_dir = MV_DIR_BACKWARD;
2837 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2838 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2840 case CANDIDATE_MB_TYPE_FORWARD:
2841 s->mv_dir = MV_DIR_FORWARD;
2843 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2844 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2846 case CANDIDATE_MB_TYPE_FORWARD_I:
2847 s->mv_dir = MV_DIR_FORWARD;
2848 s->mv_type = MV_TYPE_FIELD;
2851 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2852 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2853 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2856 case CANDIDATE_MB_TYPE_BACKWARD_I:
2857 s->mv_dir = MV_DIR_BACKWARD;
2858 s->mv_type = MV_TYPE_FIELD;
2861 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2862 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2863 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2866 case CANDIDATE_MB_TYPE_BIDIR_I:
2867 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2868 s->mv_type = MV_TYPE_FIELD;
2870 for(dir=0; dir<2; dir++){
2872 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2873 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2874 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2879 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
2882 encode_mb(s, motion_x, motion_y);
2884 // RAL: Update last macroblock type
2885 s->last_mv_dir = s->mv_dir;
2887 if (CONFIG_H263_ENCODER &&
2888 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2889 ff_h263_update_motion_val(s);
2891 ff_MPV_decode_mb(s, s->block);
2894 /* clean the MV table in IPS frames for direct mode in B frames */
2895 if(s->mb_intra /* && I,P,S_TYPE */){
2896 s->p_mv_table[xy][0]=0;
2897 s->p_mv_table[xy][1]=0;
2900 if(s->flags&CODEC_FLAG_PSNR){
2904 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2905 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2907 s->current_picture.f.error[0] += sse(
2908 s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
2909 s->dest[0], w, h, s->linesize);
2910 s->current_picture.f.error[1] += sse(
2911 s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2912 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2913 s->current_picture.f.error[2] += sse(
2914 s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2915 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2918 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
2919 ff_h263_loop_filter(s);
2921 av_dlog(s->avctx, "MB %d %d bits\n",
2922 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
2926 //not beautiful here but we must write it before flushing so it has to be here
2927 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
2928 ff_msmpeg4_encode_ext_header(s);
2932 /* Send the last GOB if RTP */
2933 if (s->avctx->rtp_callback) {
2934 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
2935 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
2936 /* Call the RTP callback to send the last GOB */
2938 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
2944 #define MERGE(field) dst->field += src->field; src->field=0
2945 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
2946 MERGE(me.scene_change_score);
2947 MERGE(me.mc_mb_var_sum_temp);
2948 MERGE(me.mb_var_sum_temp);
2951 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
2954 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
2955 MERGE(dct_count[1]);
2964 MERGE(er.error_count);
2965 MERGE(padding_bug_score);
2966 MERGE(current_picture.f.error[0]);
2967 MERGE(current_picture.f.error[1]);
2968 MERGE(current_picture.f.error[2]);
2970 if(dst->avctx->noise_reduction){
2971 for(i=0; i<64; i++){
2972 MERGE(dct_error_sum[0][i]);
2973 MERGE(dct_error_sum[1][i]);
2977 assert(put_bits_count(&src->pb) % 8 ==0);
2978 assert(put_bits_count(&dst->pb) % 8 ==0);
2979 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
2980 flush_put_bits(&dst->pb);
2983 static int estimate_qp(MpegEncContext *s, int dry_run){
2984 if (s->next_lambda){
2985 s->current_picture_ptr->f.quality =
2986 s->current_picture.f.quality = s->next_lambda;
2987 if(!dry_run) s->next_lambda= 0;
2988 } else if (!s->fixed_qscale) {
2989 s->current_picture_ptr->f.quality =
2990 s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
2991 if (s->current_picture.f.quality < 0)
2995 if(s->adaptive_quant){
2996 switch(s->codec_id){
2997 case AV_CODEC_ID_MPEG4:
2998 if (CONFIG_MPEG4_ENCODER)
2999 ff_clean_mpeg4_qscales(s);
3001 case AV_CODEC_ID_H263:
3002 case AV_CODEC_ID_H263P:
3003 case AV_CODEC_ID_FLV1:
3004 if (CONFIG_H263_ENCODER)
3005 ff_clean_h263_qscales(s);
3008 ff_init_qscale_tab(s);
3011 s->lambda= s->lambda_table[0];
3014 s->lambda = s->current_picture.f.quality;
3019 /* must be called before writing the header */
3020 static void set_frame_distances(MpegEncContext * s){
3021 assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3022 s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3024 if(s->pict_type==AV_PICTURE_TYPE_B){
3025 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3026 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3028 s->pp_time= s->time - s->last_non_b_time;
3029 s->last_non_b_time= s->time;
3030 assert(s->picture_number==0 || s->pp_time > 0);
3034 static int encode_picture(MpegEncContext *s, int picture_number)
3038 int context_count = s->slice_context_count;
3040 s->picture_number = picture_number;
3042 /* Reset the average MB variance */
3043 s->me.mb_var_sum_temp =
3044 s->me.mc_mb_var_sum_temp = 0;
3046 /* we need to initialize some time vars before we can encode b-frames */
3047 // RAL: Condition added for MPEG1VIDEO
3048 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3049 set_frame_distances(s);
3050 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3051 ff_set_mpeg4_time(s);
3053 s->me.scene_change_score=0;
3055 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3057 if(s->pict_type==AV_PICTURE_TYPE_I){
3058 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3059 else s->no_rounding=0;
3060 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3061 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3062 s->no_rounding ^= 1;
3065 if(s->flags & CODEC_FLAG_PASS2){
3066 if (estimate_qp(s,1) < 0)
3068 ff_get_2pass_fcode(s);
3069 }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3070 if(s->pict_type==AV_PICTURE_TYPE_B)
3071 s->lambda= s->last_lambda_for[s->pict_type];
3073 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3077 s->mb_intra=0; //for the rate distortion & bit compare functions
3078 for(i=1; i<context_count; i++){
3079 ret = ff_update_duplicate_context(s->thread_context[i], s);
3087 /* Estimate motion for every MB */
3088 if(s->pict_type != AV_PICTURE_TYPE_I){
3089 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3090 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3091 if (s->pict_type != AV_PICTURE_TYPE_B) {
3092 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3093 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3097 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3098 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3100 for(i=0; i<s->mb_stride*s->mb_height; i++)
3101 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3103 if(!s->fixed_qscale){
3104 /* finding spatial complexity for I-frame rate control */
3105 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3108 for(i=1; i<context_count; i++){
3109 merge_context_after_me(s, s->thread_context[i]);
3111 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3112 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3115 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3116 s->pict_type= AV_PICTURE_TYPE_I;
3117 for(i=0; i<s->mb_stride*s->mb_height; i++)
3118 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3119 av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
3120 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3124 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3125 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3127 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3129 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3130 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3131 s->f_code= FFMAX3(s->f_code, a, b);
3134 ff_fix_long_p_mvs(s);
3135 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3136 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3140 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3141 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3146 if(s->pict_type==AV_PICTURE_TYPE_B){
3149 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3150 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3151 s->f_code = FFMAX(a, b);
3153 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3154 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3155 s->b_code = FFMAX(a, b);
3157 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3158 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3159 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3160 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3161 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3163 for(dir=0; dir<2; dir++){
3166 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3167 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3168 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3169 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3177 if (estimate_qp(s, 0) < 0)
3180 if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3181 s->qscale= 3; //reduce clipping problems
3183 if (s->out_format == FMT_MJPEG) {
3184 /* for mjpeg, we do include qscale in the matrix */
3186 int j= s->dsp.idct_permutation[i];
3188 s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3190 s->y_dc_scale_table=
3191 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3192 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3193 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3194 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3198 //FIXME var duplication
3199 s->current_picture_ptr->f.key_frame =
3200 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3201 s->current_picture_ptr->f.pict_type =
3202 s->current_picture.f.pict_type = s->pict_type;
3204 if (s->current_picture.f.key_frame)
3205 s->picture_in_gop_number=0;
3207 s->last_bits= put_bits_count(&s->pb);
3208 switch(s->out_format) {
3210 if (CONFIG_MJPEG_ENCODER)
3211 ff_mjpeg_encode_picture_header(s);
3214 if (CONFIG_H261_ENCODER)
3215 ff_h261_encode_picture_header(s, picture_number);
3218 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3219 ff_wmv2_encode_picture_header(s, picture_number);
3220 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3221 ff_msmpeg4_encode_picture_header(s, picture_number);
3222 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3223 ff_mpeg4_encode_picture_header(s, picture_number);
3224 else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3225 ff_rv10_encode_picture_header(s, picture_number);
3226 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3227 ff_rv20_encode_picture_header(s, picture_number);
3228 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3229 ff_flv_encode_picture_header(s, picture_number);
3230 else if (CONFIG_H263_ENCODER)
3231 ff_h263_encode_picture_header(s, picture_number);
3234 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3235 ff_mpeg1_encode_picture_header(s, picture_number);
3240 bits= put_bits_count(&s->pb);
3241 s->header_bits= bits - s->last_bits;
3243 for(i=1; i<context_count; i++){
3244 update_duplicate_context_after_me(s->thread_context[i], s);
3246 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3247 for(i=1; i<context_count; i++){
3248 merge_context_after_encode(s, s->thread_context[i]);
3254 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3255 const int intra= s->mb_intra;
3258 s->dct_count[intra]++;
3260 for(i=0; i<64; i++){
3261 int level= block[i];
3265 s->dct_error_sum[intra][i] += level;
3266 level -= s->dct_offset[intra][i];
3267 if(level<0) level=0;
3269 s->dct_error_sum[intra][i] -= level;
3270 level += s->dct_offset[intra][i];
3271 if(level>0) level=0;
3278 static int dct_quantize_trellis_c(MpegEncContext *s,
3279 int16_t *block, int n,
3280 int qscale, int *overflow){
3282 const uint8_t *scantable= s->intra_scantable.scantable;
3283 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3285 unsigned int threshold1, threshold2;
3297 int coeff_count[64];
3298 int qmul, qadd, start_i, last_non_zero, i, dc;
3299 const int esc_length= s->ac_esc_length;
3301 uint8_t * last_length;
3302 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3304 s->dsp.fdct (block);
3306 if(s->dct_error_sum)
3307 s->denoise_dct(s, block);
3309 qadd= ((qscale-1)|1)*8;
3320 /* For AIC we skip quant/dequant of INTRADC */
3325 /* note: block[0] is assumed to be positive */
3326 block[0] = (block[0] + (q >> 1)) / q;
3329 qmat = s->q_intra_matrix[qscale];
3330 if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3331 bias= 1<<(QMAT_SHIFT-1);
3332 length = s->intra_ac_vlc_length;
3333 last_length= s->intra_ac_vlc_last_length;
3337 qmat = s->q_inter_matrix[qscale];
3338 length = s->inter_ac_vlc_length;
3339 last_length= s->inter_ac_vlc_last_length;
3343 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3344 threshold2= (threshold1<<1);
3346 for(i=63; i>=start_i; i--) {
3347 const int j = scantable[i];
3348 int level = block[j] * qmat[j];
3350 if(((unsigned)(level+threshold1))>threshold2){
3356 for(i=start_i; i<=last_non_zero; i++) {
3357 const int j = scantable[i];
3358 int level = block[j] * qmat[j];
3360 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3361 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3362 if(((unsigned)(level+threshold1))>threshold2){
3364 level= (bias + level)>>QMAT_SHIFT;
3366 coeff[1][i]= level-1;
3367 // coeff[2][k]= level-2;
3369 level= (bias - level)>>QMAT_SHIFT;
3370 coeff[0][i]= -level;
3371 coeff[1][i]= -level+1;
3372 // coeff[2][k]= -level+2;
3374 coeff_count[i]= FFMIN(level, 2);
3375 assert(coeff_count[i]);
3378 coeff[0][i]= (level>>31)|1;
3383 *overflow= s->max_qcoeff < max; //overflow might have happened
3385 if(last_non_zero < start_i){
3386 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3387 return last_non_zero;
3390 score_tab[start_i]= 0;
3391 survivor[0]= start_i;
3394 for(i=start_i; i<=last_non_zero; i++){
3395 int level_index, j, zero_distortion;
3396 int dct_coeff= FFABS(block[ scantable[i] ]);
3397 int best_score=256*256*256*120;
3399 if (s->dsp.fdct == ff_fdct_ifast)
3400 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3401 zero_distortion= dct_coeff*dct_coeff;
3403 for(level_index=0; level_index < coeff_count[i]; level_index++){
3405 int level= coeff[level_index][i];
3406 const int alevel= FFABS(level);
3411 if(s->out_format == FMT_H263){
3412 unquant_coeff= alevel*qmul + qadd;
3414 j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3416 unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
3417 unquant_coeff = (unquant_coeff - 1) | 1;
3419 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3420 unquant_coeff = (unquant_coeff - 1) | 1;
3425 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3427 if((level&(~127)) == 0){
3428 for(j=survivor_count-1; j>=0; j--){
3429 int run= i - survivor[j];
3430 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3431 score += score_tab[i-run];
3433 if(score < best_score){
3436 level_tab[i+1]= level-64;
3440 if(s->out_format == FMT_H263){
3441 for(j=survivor_count-1; j>=0; j--){
3442 int run= i - survivor[j];
3443 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3444 score += score_tab[i-run];
3445 if(score < last_score){
3448 last_level= level-64;
3454 distortion += esc_length*lambda;
3455 for(j=survivor_count-1; j>=0; j--){
3456 int run= i - survivor[j];
3457 int score= distortion + score_tab[i-run];
3459 if(score < best_score){
3462 level_tab[i+1]= level-64;
3466 if(s->out_format == FMT_H263){
3467 for(j=survivor_count-1; j>=0; j--){
3468 int run= i - survivor[j];
3469 int score= distortion + score_tab[i-run];
3470 if(score < last_score){
3473 last_level= level-64;
3481 score_tab[i+1]= best_score;
3483 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3484 if(last_non_zero <= 27){
3485 for(; survivor_count; survivor_count--){
3486 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3490 for(; survivor_count; survivor_count--){
3491 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3496 survivor[ survivor_count++ ]= i+1;
3499 if(s->out_format != FMT_H263){
3500 last_score= 256*256*256*120;
3501 for(i= survivor[0]; i<=last_non_zero + 1; i++){
3502 int score= score_tab[i];
3503 if(i) score += lambda*2; //FIXME exacter?
3505 if(score < last_score){
3508 last_level= level_tab[i];
3509 last_run= run_tab[i];
3514 s->coded_score[n] = last_score;
3516 dc= FFABS(block[0]);
3517 last_non_zero= last_i - 1;
3518 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3520 if(last_non_zero < start_i)
3521 return last_non_zero;
3523 if(last_non_zero == 0 && start_i == 0){
3525 int best_score= dc * dc;
3527 for(i=0; i<coeff_count[0]; i++){
3528 int level= coeff[i][0];
3529 int alevel= FFABS(level);
3530 int unquant_coeff, score, distortion;
3532 if(s->out_format == FMT_H263){
3533 unquant_coeff= (alevel*qmul + qadd)>>3;
3535 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
3536 unquant_coeff = (unquant_coeff - 1) | 1;
3538 unquant_coeff = (unquant_coeff + 4) >> 3;
3539 unquant_coeff<<= 3 + 3;
3541 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
3543 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
3544 else score= distortion + esc_length*lambda;
3546 if(score < best_score){
3548 best_level= level - 64;
3551 block[0]= best_level;
3552 s->coded_score[n] = best_score - dc*dc;
3553 if(best_level == 0) return -1;
3554 else return last_non_zero;
3560 block[ perm_scantable[last_non_zero] ]= last_level;
3563 for(; i>start_i; i -= run_tab[i] + 1){
3564 block[ perm_scantable[i-1] ]= level_tab[i];
3567 return last_non_zero;
3570 //#define REFINE_STATS 1
3571 static int16_t basis[64][64];
3573 static void build_basis(uint8_t *perm){
3580 double s= 0.25*(1<<BASIS_SHIFT);
3582 int perm_index= perm[index];
3583 if(i==0) s*= sqrt(0.5);
3584 if(j==0) s*= sqrt(0.5);
3585 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
3592 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
3593 int16_t *block, int16_t *weight, int16_t *orig,
3596 LOCAL_ALIGNED_16(int16_t, d1, [64]);
3597 const uint8_t *scantable= s->intra_scantable.scantable;
3598 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3599 // unsigned int threshold1, threshold2;
3604 int qmul, qadd, start_i, last_non_zero, i, dc;
3606 uint8_t * last_length;
3608 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
3611 static int after_last=0;
3612 static int to_zero=0;
3613 static int from_zero=0;
3616 static int messed_sign=0;
3619 if(basis[0][0] == 0)
3620 build_basis(s->dsp.idct_permutation);
3631 /* For AIC we skip quant/dequant of INTRADC */
3635 q <<= RECON_SHIFT-3;
3636 /* note: block[0] is assumed to be positive */
3638 // block[0] = (block[0] + (q >> 1)) / q;
3640 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3641 // bias= 1<<(QMAT_SHIFT-1);
3642 length = s->intra_ac_vlc_length;
3643 last_length= s->intra_ac_vlc_last_length;
3647 length = s->inter_ac_vlc_length;
3648 last_length= s->inter_ac_vlc_last_length;
3650 last_non_zero = s->block_last_index[n];
3655 dc += (1<<(RECON_SHIFT-1));
3656 for(i=0; i<64; i++){
3657 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
3660 STOP_TIMER("memset rem[]")}
3663 for(i=0; i<64; i++){
3668 w= FFABS(weight[i]) + qns*one;
3669 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
3672 // w=weight[i] = (63*qns + (w/2)) / w;
3678 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
3684 for(i=start_i; i<=last_non_zero; i++){
3685 int j= perm_scantable[i];
3686 const int level= block[j];
3690 if(level<0) coeff= qmul*level - qadd;
3691 else coeff= qmul*level + qadd;
3692 run_tab[rle_index++]=run;
3695 s->dsp.add_8x8basis(rem, basis[j], coeff);
3701 if(last_non_zero>0){
3702 STOP_TIMER("init rem[]")
3709 int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
3712 int run2, best_unquant_change=0, analyze_gradient;
3716 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
3718 if(analyze_gradient){
3722 for(i=0; i<64; i++){
3725 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
3728 STOP_TIMER("rem*w*w")}
3738 const int level= block[0];
3739 int change, old_coeff;
3741 assert(s->mb_intra);
3745 for(change=-1; change<=1; change+=2){
3746 int new_level= level + change;
3747 int score, new_coeff;
3749 new_coeff= q*new_level;
3750 if(new_coeff >= 2048 || new_coeff < 0)
3753 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
3754 if(score<best_score){
3757 best_change= change;
3758 best_unquant_change= new_coeff - old_coeff;
3765 run2= run_tab[rle_index++];
3769 for(i=start_i; i<64; i++){
3770 int j= perm_scantable[i];
3771 const int level= block[j];
3772 int change, old_coeff;
3774 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
3778 if(level<0) old_coeff= qmul*level - qadd;
3779 else old_coeff= qmul*level + qadd;
3780 run2= run_tab[rle_index++]; //FIXME ! maybe after last
3784 assert(run2>=0 || i >= last_non_zero );
3787 for(change=-1; change<=1; change+=2){
3788 int new_level= level + change;
3789 int score, new_coeff, unquant_change;
3792 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
3796 if(new_level<0) new_coeff= qmul*new_level - qadd;
3797 else new_coeff= qmul*new_level + qadd;
3798 if(new_coeff >= 2048 || new_coeff <= -2048)
3800 //FIXME check for overflow
3803 if(level < 63 && level > -63){
3804 if(i < last_non_zero)
3805 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
3806 - length[UNI_AC_ENC_INDEX(run, level+64)];
3808 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
3809 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
3812 assert(FFABS(new_level)==1);
3814 if(analyze_gradient){
3815 int g= d1[ scantable[i] ];
3816 if(g && (g^new_level) >= 0)
3820 if(i < last_non_zero){
3821 int next_i= i + run2 + 1;
3822 int next_level= block[ perm_scantable[next_i] ] + 64;
3824 if(next_level&(~127))
3827 if(next_i < last_non_zero)
3828 score += length[UNI_AC_ENC_INDEX(run, 65)]
3829 + length[UNI_AC_ENC_INDEX(run2, next_level)]
3830 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3832 score += length[UNI_AC_ENC_INDEX(run, 65)]
3833 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3834 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3836 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
3838 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3839 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3845 assert(FFABS(level)==1);
3847 if(i < last_non_zero){
3848 int next_i= i + run2 + 1;
3849 int next_level= block[ perm_scantable[next_i] ] + 64;
3851 if(next_level&(~127))
3854 if(next_i < last_non_zero)
3855 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3856 - length[UNI_AC_ENC_INDEX(run2, next_level)]
3857 - length[UNI_AC_ENC_INDEX(run, 65)];
3859 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3860 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3861 - length[UNI_AC_ENC_INDEX(run, 65)];
3863 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
3865 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3866 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3873 unquant_change= new_coeff - old_coeff;
3874 assert((score < 100*lambda && score > -100*lambda) || lambda==0);
3876 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
3877 if(score<best_score){
3880 best_change= change;
3881 best_unquant_change= unquant_change;
3885 prev_level= level + 64;
3886 if(prev_level&(~127))
3895 STOP_TIMER("iterative step")}
3899 int j= perm_scantable[ best_coeff ];
3901 block[j] += best_change;
3903 if(best_coeff > last_non_zero){
3904 last_non_zero= best_coeff;
3912 if(block[j] - best_change){
3913 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
3925 for(; last_non_zero>=start_i; last_non_zero--){
3926 if(block[perm_scantable[last_non_zero]])
3932 if(256*256*256*64 % count == 0){
3933 printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
3938 for(i=start_i; i<=last_non_zero; i++){
3939 int j= perm_scantable[i];
3940 const int level= block[j];
3943 run_tab[rle_index++]=run;
3950 s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
3956 if(last_non_zero>0){
3957 STOP_TIMER("iterative search")
3962 return last_non_zero;
3965 int ff_dct_quantize_c(MpegEncContext *s,
3966 int16_t *block, int n,
3967 int qscale, int *overflow)
3969 int i, j, level, last_non_zero, q, start_i;
3971 const uint8_t *scantable= s->intra_scantable.scantable;
3974 unsigned int threshold1, threshold2;
3976 s->dsp.fdct (block);
3978 if(s->dct_error_sum)
3979 s->denoise_dct(s, block);
3989 /* For AIC we skip quant/dequant of INTRADC */
3992 /* note: block[0] is assumed to be positive */
3993 block[0] = (block[0] + (q >> 1)) / q;
3996 qmat = s->q_intra_matrix[qscale];
3997 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4001 qmat = s->q_inter_matrix[qscale];
4002 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4004 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4005 threshold2= (threshold1<<1);
4006 for(i=63;i>=start_i;i--) {
4008 level = block[j] * qmat[j];
4010 if(((unsigned)(level+threshold1))>threshold2){
4017 for(i=start_i; i<=last_non_zero; i++) {
4019 level = block[j] * qmat[j];
4021 // if( bias+level >= (1<<QMAT_SHIFT)
4022 // || bias-level >= (1<<QMAT_SHIFT)){
4023 if(((unsigned)(level+threshold1))>threshold2){
4025 level= (bias + level)>>QMAT_SHIFT;
4028 level= (bias - level)>>QMAT_SHIFT;
4036 *overflow= s->max_qcoeff < max; //overflow might have happened
4038 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4039 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4040 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4042 return last_non_zero;
4045 #define OFFSET(x) offsetof(MpegEncContext, x)
4046 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4047 static const AVOption h263_options[] = {
4048 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4049 { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4050 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4055 static const AVClass h263_class = {
4056 .class_name = "H.263 encoder",
4057 .item_name = av_default_item_name,
4058 .option = h263_options,
4059 .version = LIBAVUTIL_VERSION_INT,
4062 AVCodec ff_h263_encoder = {
4064 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4065 .type = AVMEDIA_TYPE_VIDEO,
4066 .id = AV_CODEC_ID_H263,
4067 .priv_data_size = sizeof(MpegEncContext),
4068 .init = ff_MPV_encode_init,
4069 .encode2 = ff_MPV_encode_picture,
4070 .close = ff_MPV_encode_end,
4071 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4072 .priv_class = &h263_class,
4075 static const AVOption h263p_options[] = {
4076 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4077 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4078 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4079 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4083 static const AVClass h263p_class = {
4084 .class_name = "H.263p encoder",
4085 .item_name = av_default_item_name,
4086 .option = h263p_options,
4087 .version = LIBAVUTIL_VERSION_INT,
4090 AVCodec ff_h263p_encoder = {
4092 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4093 .type = AVMEDIA_TYPE_VIDEO,
4094 .id = AV_CODEC_ID_H263P,
4095 .priv_data_size = sizeof(MpegEncContext),
4096 .init = ff_MPV_encode_init,
4097 .encode2 = ff_MPV_encode_picture,
4098 .close = ff_MPV_encode_end,
4099 .capabilities = CODEC_CAP_SLICE_THREADS,
4100 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4101 .priv_class = &h263p_class,
4104 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4106 AVCodec ff_msmpeg4v2_encoder = {
4107 .name = "msmpeg4v2",
4108 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4109 .type = AVMEDIA_TYPE_VIDEO,
4110 .id = AV_CODEC_ID_MSMPEG4V2,
4111 .priv_data_size = sizeof(MpegEncContext),
4112 .init = ff_MPV_encode_init,
4113 .encode2 = ff_MPV_encode_picture,
4114 .close = ff_MPV_encode_end,
4115 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4116 .priv_class = &msmpeg4v2_class,
4119 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4121 AVCodec ff_msmpeg4v3_encoder = {
4123 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4124 .type = AVMEDIA_TYPE_VIDEO,
4125 .id = AV_CODEC_ID_MSMPEG4V3,
4126 .priv_data_size = sizeof(MpegEncContext),
4127 .init = ff_MPV_encode_init,
4128 .encode2 = ff_MPV_encode_picture,
4129 .close = ff_MPV_encode_end,
4130 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4131 .priv_class = &msmpeg4v3_class,
4134 FF_MPV_GENERIC_CLASS(wmv1)
4136 AVCodec ff_wmv1_encoder = {
4138 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4139 .type = AVMEDIA_TYPE_VIDEO,
4140 .id = AV_CODEC_ID_WMV1,
4141 .priv_data_size = sizeof(MpegEncContext),
4142 .init = ff_MPV_encode_init,
4143 .encode2 = ff_MPV_encode_picture,
4144 .close = ff_MPV_encode_end,
4145 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4146 .priv_class = &wmv1_class,